with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[2]))):
    C4, _ = resnet_v1.resnet_v1(C3,
                                blocks[2:3],
                                global_pool=False,
                                include_root_block=False,
                                scope=scope_name)
# add_heatmap(C4, name='Layer/C4')
# C4 = tf.Print(C4, [tf.shape(C4)], summarize=10, message='C4_shape')
feature_c2=slim.conv2d_transpose(C2,
                                 1024,
                                 [3,3],                                  
                                 stride=4,
                                 padding='SAME')

feature_c3=slim.conv2d_transpose(C3,
                                 1024,
                                 [3,3],
                                 stride=2,
                                 padding='SAME') 
feature_to_cropped=tf.concat([**strong text**feature_c2,feature_c3,C4],3)
feature_to_cropped=slim.conv2d(feature_to_cropped,1024,[3,3],1)
print(C4)
print(feature_to_cropped)
return feature_to_cropped

我想连续的功能图,我检查了feature_c2的形状,feature_c3是[1,高度,宽度,1024],c4是[1,高度,宽度,1024],我将它们连接起来,转发,返回的结果是[1,身高,宽度,1024],但为什么它需要昏暗2中的通道?

InvalidArgumentError(参见上面的回溯):ConcatOp:输入的维度应该匹配:shape [0] = [1,1024,300,476] vs. shape [1] = [1,1024,76,120] [[Node:concat = ConcatV2 [N = 3,T = DT_FLOAT,Tidx = DT_INT32,_device =“/ job:localhost / replica:0 / task:0 / device:GPU:0”](Conv2d_transpose / Relu,Conv2d_transpose_1 / Relu,resnet_v1_101_3 / block3 / unit_23 / bottleneck_v1 / Relu,postprocess_RPN / ones / Const)]] [[Node:postprocess_fastrcnn / GatherV2_20 / _1405 = _Recvclient_terminated = false,recv_device =“/ job:localhost / replica:0 / task:0 / device:CPU:0”, send_device =“/ job:localhost / replica:0 / task:0 / device:GPU:0”,send_device_incarnation = 1,tensor_name =“edge_4116_postprocess_fastrcnn / GatherV2_20”,tensor_type = DT_FLOAT,_device =“/ job:localhost / replica:0 /任务:0 /装置:CPU:0" ]]