我在tensorflow中有以下模型:
def output_layer(input_layer, num_labels):
'''
:param input_layer: 2D tensor
:param num_labels: int. How many output labels in total? (10 for cifar10 and 100 for cifar100)
:return: output layer Y = WX + B
'''
input_dim = input_layer.get_shape().as_list()[-1]
fc_w = create_variables(name='fc_weights', shape=[input_dim, num_labels],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
fc_b = create_variables(name='fc_bias', shape=[num_labels], initializer=tf.zeros_initializer())
fc_h = tf.matmul(input_layer, fc_w) + fc_b
return fc_h
def model(input_features):
with tf.variable_scope("GRU"):
cell1 = tf.nn.rnn_cell.GRUCell(gru1_cell_size)
cell2 = tf.nn.rnn_cell.GRUCell(gru2_cell_size)
mcell = tf.nn.rnn_cell.MultiRNNCell([cell1, cell2], state_is_tuple=False)
# shape=(?, 64 + 32)
initial_state = tf.placeholder(shape=[None, gru1_cell_size + gru2_cell_size], dtype=tf.float32, name="initial_state")
output, new_state = tf.nn.dynamic_rnn(mcell, input_features, dtype=tf.float32, initial_state=initial_state)
with tf.variable_scope("output_reshaped"):
# before, shape: (34, 1768, 32), after, shape: (34 * 1768, 32)
output = tf.reshape(output, shape=[-1, gru2_cell_size])
with tf.variable_scope("output_layer"):
# shape: (34 * 1768, 3)
predictions = output_layer(output, num_labels)
predictions = tf.reshape(predictions, shape=[-1, 100, 3])
return predictions, initial_state, new_state, output
因此,从代码中可以看出第一个GRU的单元大小为64,第二个GRU的单元大小为32.批量大小为34(但这对我来说并不重要) . 输入功能的大小是200.我尝试通过以下方式计算损失的梯度:
local_grads_and_vars = optimizer.compute_gradients(loss, tf.trainable_variables())
# only the gradients are taken to add them later with the back propagated gradients from previous batch.
local_grads = [grad for grad, var in local_grads_and_vars]
for v in local_grads:
print("v", v)
印刷毕业后我得到以下内容:
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/MatMul/Enter_grad/b_acc_3:0", shape=(264, 128), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/BiasAdd/Enter_grad/b_acc_3:0", shape=(128,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/MatMul_1/Enter_grad/b_acc_3:0", shape=(264, 64), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/BiasAdd_1/Enter_grad/b_acc_3:0", shape=(64,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/MatMul/Enter_grad/b_acc_3:0", shape=(96, 64), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/BiasAdd/Enter_grad/b_acc_3:0", shape=(64,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/MatMul_1/Enter_grad/b_acc_3:0", shape=(96, 32), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/BiasAdd_1/Enter_grad/b_acc_3:0", shape=(32,), dtype=float32)
v Tensor("Optimizer/gradients/output_layer/MatMul_grad/tuple/control_dependency_1:0", shape=(32, 3), dtype=float32)
v Tensor("Optimizer/gradients/output_layer/add_grad/tuple/control_dependency_1:0", shape=(3,), dtype=float32)
假设我在第一批训练模型后保存了渐变,也就是说,在给出一个形状的张量: (34, 100, 200)
作为 input_features
"In the model function argument",并输出形状 (34 * 100, 3)
之后,如何在第二批小批量上反向传播这些渐变?
2 回答
来自
tf.gradients
的文档所以你的
grad_ys
应该是一个与输入ys
长度相同的列表 .复制代码我能够运行以下代码:
以下是带有自定义代码的解决方案:
这是一个接一个的2个GRU的示例,我根据
optimize()
中的代码以两种不同的方式进行反向传播 .