我是深度学习程序员的初学者 . 我连续两天都遇到了问题 . 我见过很多类似的问题,但我还没有解决我的问题 . 我需要你的帮助 . 我试图调试张量的尺寸,但会出现以下错误消息 .

(网络层)

with self._model_variable_scope():
            if self.data_format == 'channels_first':
                # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
                # This provides a large performance boost on GPU. See
                # https://www.tensorflow.org/performance/performance_guide#data_formats
                self.input_imgs = tf.transpose(self.input_imgs, [0, 3, 1, 2])

            self.input_imgs = conv2d_fixed_padding(
                inputs=self.input_imgs, filters=self.num_filters, kernel_size=self.kernel_size,
                strides=self.conv_stride, data_format=self.data_format)
            self.input_imgs = tf.identity(self.input_imgs, 'initial_conv')

            # We do not include batch normalization or activation functions in V2
            # for the initial conv1 because the first ResNet unit will perform these
            # for both the shortcut and non-shortcut paths as part of the first
            # block's projection. Cf. Appendix of [2].
            if self.resnet_version == 1:
                self.input_imgs = batch_norm(self.input_imgs, self.training, self.data_format)
                self.input_imgs = tf.nn.relu(self.input_imgs)

            if self.first_pool_size:
                self.input_imgs = tf.layers.max_pooling2d(
                    inputs=self.input_imgs, pool_size=self.first_pool_size,
                    strides=self.first_pool_stride, padding='SAME',
                    data_format=self.data_format)
                self.input_imgs = tf.identity(self.input_imgs, 'initial_max_pool')

            for i, num_blocks in enumerate(self.block_sizes):
                num_filters = self.num_filters * (2 ** i)
                self.input_imgs = block_layer(
                    inputs=self.input_imgs, filters=num_filters, bottleneck=self.bottleneck,
                    block_fn=self.block_fn, blocks=num_blocks,
                    strides=self.block_strides[i], training=self.training,
                    name='block_layer{}'.format(i + 1), data_format=self.data_format)

            # Only apply the BN and ReLU for model that does pre_activation in each
            # building/bottleneck block, eg resnet V2.
            if self.pre_activation:
                self.input_imgs = batch_norm(self.input_imgs, self.training, self.data_format)
                self.input_imgs = tf.nn.relu(self.input_imgs)

            # The current top layer has shape
            # `batch_size x pool_size x pool_size x final_size`.
            # ResNet does an Average Pooling layer over pool_size,
            # but that is the same as doing a reduce_mean. We do a reduce_mean
            # here because it performs better than AveragePooling2D.
            axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
            self.input_imgs = tf.reduce_mean(self.input_imgs, axes, keep_dims=True)
            self.input_imgs = tf.identity(self.input_imgs, 'final_reduce_mean')

            self.input_imgs = tf.reshape(self.input_imgs, [-1, self.final_size])
            self.input_imgs = tf.layers.dense(inputs=self.input_imgs, units=self.num_classes)
            self.final_output = tf.identity(self.input_imgs, 'final_dense')

(占位符和丢失设置方法)

def model_setup(self):
    with tf.variable_scope("Model") as scope:
        self.input_imgs = tf.placeholder(tf.float32, [self.batch_size, self.img_height, self.img_width, self.img_depth])
        self.input_labels = tf.placeholder(tf.int32, [self.batch_size])

        if (self.dataset == 'cifar-10'):
            self.cifar_model_setup()
        else:
            print("No such dataset exist. Exiting the program")
            sys.exit()

    self.model_vars = tf.trainable_variables()
    for var in self.model_vars: print(var.name, var.get_shape())
    self.do_setup = False

def loss_setup(self):
    self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.input_imgs, labels=self.input_labels, name="Error_loss")
    self.loss = tf.reduce_mean(self.loss)

    optimizer = tf.train.AdamOptimizer(0.001, beta1=0.5)
    self.loss_optimizer = optimizer.minimize(self.loss)

    # Defining the summary ops
    self.cl_loss_summ = tf.summary.scalar("cl_loss", self.loss)

(火车 - >这里发生错误))---

for itr in range(0, int(self.num_images / self.batch_size)):
                imgs = self.train_images[itr * self.batch_size:(itr + 1) * (self.batch_size)]
                labels = self.train_labels[itr * self.batch_size:(itr + 1) * (self.batch_size)]

                _, summary_str, cl_loss_temp = sess.run([self.loss_optimizer, self.cl_loss_summ, self.loss],
                                                        feed_dict={self.input_imgs: imgs,
                                                                   self.input_labels: labels})

错误消息:%(np_val.shape,subfeed_t.name,str(subfeed_t.get_shape())))ValueError:无法为Tensor的Model / resnet_model / dense / BiasAdd提供形状值(100,32,32,3) :0',其形状为'(100,10)'

已通过调试检查网络输出self.final_output,以显示形状为(100,10) . 请让我知道导致问题的原因 . 我很感激您考虑我是初学者 .