我知道这个问题已被问到很多,但我无法弄清楚解决方案 . 我已经为图像构建了一个cnn和preproccecing,一切正常,直到我在 sess.run 中设置了 feed_dict 参数 . 我有24个 class ,图像大小为(64,64) . imagepathslabels 都是路径列表 .

imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
image, label = tf.train.slice_input_producer([imagepaths, labels],
                                             shuffle=True) 
 #(here) i did decoding,resing and normalizing

X, Y = tf.train.batch([image, label], batch_size=batch_size,
                      capacity=batch_size * 8,
                      num_threads=4)
place_holder_X = tf.placeholder(tf.string,(128, 64, 64, 3), name="Input")
place_holder_Y = tf.placeholder(tf.int32,(128,None), name="Target")
with tf.Session() as sess:
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)   

    try:
        for step in range(1, num_steps+1):
        #print("step = ",step,"display step",display_step,end=' ')
            if coord.should_stop():
                break
            if step % display_step == 0:


                images, labels = sess.run([X, Y])
                _, loss, acc = sess.run([train_op, loss_op, accuracy], feed_dict={place_holder_X: images, place_holder_Y: labels})
                print("Step " + str(step) + ", Minibatch Loss= " + \
                      "{:.4f}".format(loss) + ", Training Accuracy= " + \
                      "{:.3f}".format(acc))
            else:
                # Only run the optimization op (backprop)

                sess.run(train_op)
    except Exception as e:

        coord.request_stop(e)
    finally:

        coord.request_stop()
        coord.join(threads)
    print("Optimization Finished!")
    print("Time taken: %f" % (time.time() - startTime))
    saver = tf.train.Saver()
    saver.save(sess, "./models1/my_tf_model.ckpt")