Commit 55e8eacf authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Code changes

parent 1b97292e
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -39,6 +39,11 @@ class SequentialGraph(object):
  def add(self, layer):
    """Adds a new layer to model."""
    with self.graph.as_default():
      ############################################# DEBUG
      #print("start - add()")
      #print("self.output")
      #print(self.output)
      ############################################# DEBUG
      # For graphical layers, add connectivity placeholders 
      if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
        if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
@@ -49,6 +54,11 @@ class SequentialGraph(object):
                            self.graph_topology.get_topology_placeholders())
      else:
        self.output = layer(self.output)
      ############################################# DEBUG
      #print("end- add()")
      #print("self.output")
      #print(self.output)
      ############################################# DEBUG

      # Add layer to the layer list
      self.layers.append(layer)
+5 −6
Original line number Diff line number Diff line
@@ -133,18 +133,17 @@ class MultitaskGraphClassifier(Model):

  def build(self):
    # Create target inputs
    ################################################################# DEBUG
    #self.label_placeholder = Input(tensor=tf.placeholder(
    #    dtype='bool', shape=(None,self.n_tasks), name="label_placeholder"))
    #self.weight_placeholder = Input(tensor=tf.placeholder(
    #    dtype='float32', shape=(None,self.n_tasks), name="weight_placholder"))
    self.label_placeholder = tf.placeholder(
        dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
    ################################################################# DEBUG

    feat = self.model.return_outputs()
    ################################################################ DEBUG
    #print("multitask classifier")
    #print("feat")
    #print(feat)
    ################################################################ DEBUG
    output = model_ops.multitask_logits(feat, self.n_tasks)
    return output

+5 −2
Original line number Diff line number Diff line
@@ -334,8 +334,11 @@ class Dense(Layer):
    output = model_ops.dot(x, self.W)
    if self.bias:
      output += self.b
    outputs = to_list(self.activation(output))
    return outputs
    ################################################# DEBUG
    #outputs = to_list(self.activation(output))
    #return outputs
    return output
    ################################################# DEBUG


class Dropout(Layer):
+14 −11
Original line number Diff line number Diff line
@@ -821,16 +821,16 @@ class LSTMStep(Layer):

    # Taken from Keras code [citation needed]
    ####################################################### DEBUG
    print("x")
    print(x)
    print("self.W")
    print(self.W)
    print("h_tm1")
    print(h_tm1)
    print("self.U")
    print(self.U)
    print("self.b")
    print(self.b)
    #print("x")
    #print(x)
    #print("self.W")
    #print(self.W)
    #print("h_tm1")
    #print(h_tm1)
    #print("self.U")
    #print(self.U)
    #print("self.b")
    #print(self.b)
    ####################################################### DEBUG
    z = model_ops.dot(x, self.W) + model_ops.dot(h_tm1, self.U) + self.b

@@ -846,4 +846,7 @@ class LSTMStep(Layer):

    h = o * self.activation(c)

    return o, [h, c]
    ####################################################### DEBUG
    #return o, [h, c]
    return h, [h, c]
    ####################################################### DEBUG
+5 −18
Original line number Diff line number Diff line
@@ -352,24 +352,6 @@ def epsilon():
  return 1e-7


#def variable(value, dtype=tf.float32, name=None):
#  """Instantiates a variable and returns it.
#
#  Parameters
#  ----------
#  value: Numpy array, initial value of the tensor.
#  dtype: Tensor type.
#  name: Optional name string for the tensor.
#
#  Returns
#  -------
#  A variable instance (with Keras metadata included).
#  """
#  v = tf.Variable(value, dtype=dtype, name=name)
#  v._uses_learning_phase = False
#  return v


def random_uniform_variable(shape,
                            low,
                            high,
@@ -840,6 +822,11 @@ def fully_connected_layer(tensor,
  ValueError
    If input tensor is not 2D.
  """
  ###################################################### DEBUG
  #print("fully_connected_layer")
  #print("tensor")
  #print(tensor)
  ###################################################### DEBUG
  if len(tensor.get_shape()) != 2:
    raise ValueError('Dense layer input must be 2D, not %dD' %
                     len(tensor.get_shape()))