Commit eda8ed1a authored by miaecle's avatar miaecle
Browse files

update sampels

parent 64d593ee
Loading
Loading
Loading
Loading
+1 −7
Original line number Diff line number Diff line
@@ -33,16 +33,10 @@ model = dc.models.ProgressiveMultitaskRegressor(
    weight_init_stddevs=[.02] * n_layers,
    bias_init_consts=[1.] * n_layers,
    learning_rate=.001,
    penalty=.0001,
    penalty_type="l2",
    optimizer="adam",
    batch_size=100,
    seed=123,
    verbosity="high")
    batch_size=100)

# Fit trained model
model.fit(train_dataset)
model.save()

print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
+13 −11
Original line number Diff line number Diff line
@@ -33,12 +33,15 @@ for trial in range(num_trials):
  n_layers = 3
  nb_epoch = 50
  model = dc.models.ProgressiveMultitaskRegressor(
      len(FACTORS_tasks), train_dataset.get_data_shape()[0],
      layer_sizes=[750]*n_layers, dropouts=[.25]*n_layers,
      alpha_init_stddevs=[.02]*n_layers, weight_init_stddevs=[.02]*n_layers,
      bias_init_consts=[1.]*n_layers, learning_rate=.0003,
      penalty=.0001, penalty_type="l2", optimizer="adam", batch_size=100,
      logdir="FACTORS_tf_progressive")
      len(FACTORS_tasks),
      train_dataset.get_data_shape()[0],
      layer_sizes=[750] * n_layers,
      dropouts=[.25] * n_layers,
      alpha_init_stddevs=[.02] * n_layers,
      weight_init_stddevs=[.02] * n_layers,
      bias_init_consts=[1.] * n_layers,
      learning_rate=.0003,
      batch_size=100)

  #Use R2 classification metric
  metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
@@ -54,9 +57,8 @@ for trial in range(num_trials):
  test_score, test_task_scores = model.evaluate(
      test_dataset, [metric], transformers, per_task_metrics=True)

  all_results.append((train_score, train_task_scores,
                      valid_score, valid_task_scores,
                      test_score, test_task_scores))
  all_results.append((train_score, train_task_scores, valid_score,
                      valid_task_scores, test_score, test_task_scores))

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
@@ -76,8 +78,8 @@ for trial in range(num_trials):
print("####################################################################")

for trial in range(num_trials):
  (train_score, train_task_scores, valid_score, valid_task_scores,
   test_score, test_task_scores) = all_results[trial]
  (train_score, train_task_scores, valid_score, valid_task_scores, test_score,
   test_task_scores) = all_results[trial]

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
+0 −1
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@ model = dc.models.ProgressiveMultitaskRegressor(

# Fit trained model
model.fit(train_dataset, nb_epoch=25)
model.save()

print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
+1 −5
Original line number Diff line number Diff line
@@ -41,11 +41,7 @@ for trial in range(num_trials):
      weight_init_stddevs=[.02] * n_layers,
      bias_init_consts=[1.] * n_layers,
      learning_rate=.0003,
      penalty=.0001,
      penalty_type="l2",
      optimizer="adam",
      batch_size=100,
      logdir="KAGGLE_tf_progressive")
      batch_size=100)

  #Use R2 classification metric
  metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
+13 −11
Original line number Diff line number Diff line
@@ -34,12 +34,15 @@ for trial in range(num_trials):
  n_layers = 3
  nb_epoch = 50
  model = dc.models.ProgressiveMultitaskRegressor(
      len(KINASE_tasks), train_dataset.get_data_shape()[0],
      layer_sizes=[50]*n_layers, dropouts=[.25]*n_layers,
      alpha_init_stddevs=[.02]*n_layers, weight_init_stddevs=[.02]*n_layers,
      bias_init_consts=[1.]*n_layers, learning_rate=.0003,
      penalty=.0001, penalty_type="l2", optimizer="adam", batch_size=100,
      logdir="KINASE_tf_progressive")
      len(KINASE_tasks),
      train_dataset.get_data_shape()[0],
      layer_sizes=[50] * n_layers,
      dropouts=[.25] * n_layers,
      alpha_init_stddevs=[.02] * n_layers,
      weight_init_stddevs=[.02] * n_layers,
      bias_init_consts=[1.] * n_layers,
      learning_rate=.0003,
      batch_size=100)

  #Use R2 classification metric
  metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
@@ -55,9 +58,8 @@ for trial in range(num_trials):
  test_score, test_task_scores = model.evaluate(
      test_dataset, [metric], transformers, per_task_metrics=True)

  all_results.append((train_score, train_task_scores,
                      valid_score, valid_task_scores,
                      test_score, test_task_scores))
  all_results.append((train_score, train_task_scores, valid_score,
                      valid_task_scores, test_score, test_task_scores))

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
@@ -77,8 +79,8 @@ for trial in range(num_trials):
print("####################################################################")

for trial in range(num_trials):
  (train_score, train_task_scores, valid_score, valid_task_scores,
   test_score, test_task_scores) = all_results[trial]
  (train_score, train_task_scores, valid_score, valid_task_scores, test_score,
   test_task_scores) = all_results[trial]

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
Loading