# install memory util
import urllib.request
response = urllib.request.urlopen("https://raw.githubusercontent.com/yaroslavvb/memory_util/master/memory_util.py")
open("memory_util.py", "wb").write(response.read())
import memory_util
sess = tf.Session()
a = tf.random_uniform((1000,))
b = tf.random_uniform((1000,))
c = a + b
with memory_util.capture_stderr() as stderr:
sess.run(c.op)
print(stderr.getvalue())
import os
import sys
STDOUT = 1
STDERR = 2
class FDRedirector(object):
""" Class to redirect output (stdout or stderr) at the OS level using
file descriptors.
"""
def __init__(self, fd=STDOUT):
""" fd is the file descriptor of the outpout you want to capture.
It can be STDOUT or STERR.
"""
self.fd = fd
self.started = False
self.piper = None
self.pipew = None
def start(self):
""" Setup the redirection.
"""
if not self.started:
self.oldhandle = os.dup(self.fd)
self.piper, self.pipew = os.pipe()
os.dup2(self.pipew, self.fd)
os.close(self.pipew)
self.started = True
def flush(self):
""" Flush the captured output, similar to the flush method of any
stream.
"""
if self.fd == STDOUT:
sys.stdout.flush()
elif self.fd == STDERR:
sys.stderr.flush()
def stop(self):
""" Unset the redirection and return the captured output.
"""
if self.started:
self.flush()
os.dup2(self.oldhandle, self.fd)
os.close(self.oldhandle)
f = os.fdopen(self.piper, 'r')
output = f.read()
f.close()
self.started = False
return output
else:
return ''
def getvalue(self):
""" Return the output captured since the last getvalue, or the
start of the redirection.
"""
output = self.stop()
self.start()
return output
import tensorflow as tf
x = tf.constant([1,2,3])
a=tf.Print(x, [x])
redirect=FDRedirector(STDERR)
sess = tf.InteractiveSession()
redirect.start();
a.eval();
print "Result"
print redirect.stop()
0
我遇到了同样的问题并通过在我的笔记本中使用这样的函数来解决它:
def tf_print(tensor, transform=None):
# Insert a custom python operation into the graph that does nothing but print a tensors value
def print_tensor(x):
# x is typically a numpy array here so you could do anything you want with it,
# but adding a transformation of some kind usually makes the output more digestible
print(x if transform is None else transform(x))
return x
log_op = tf.py_func(print_tensor, [tensor], [tensor.dtype])[0]
with tf.control_dependencies([log_op]):
res = tf.identity(tensor)
# Return the given tensor
return res
# Now define a tensor and use the tf_print function much like the tf.identity function
tensor = tf_print(tf.random_normal([100, 100]), transform=lambda x: [np.min(x), np.max(x)])
# This will print the transformed version of the tensors actual value
# (which was summarized to just the min and max for brevity)
sess = tf.InteractiveSession()
sess.run([tensor])
sess.close()
仅供参考,在我的自定义函数中使用 Logger 而不是调用“print”为我创造了奇迹,因为stdout通常由jupyter缓冲而未显示在“Loss is Nan”之前的错误 - 这是使用该函数的重点在我的情况下,首先 .
8
您可以检查启动 jupyter notebook 的终端以查看消息 .
import tensorflow as tf
tf.InteractiveSession()
a = tf.constant(1)
b = tf.constant(2)
opt = a + b
opt = tf.Print(opt, [opt], message="1 + 2 = ")
opt.eval()
在终端,我可以看到:
2018-01-02 23:38:07.691808: I tensorflow/core/kernels/logging_ops.cc:79] 1 + 2 = [3]
5 回答
Update Feb 3, 2017 我把它包装成了memory_util包 . 用法示例
** 老东西**
您可以从IPython核心重用FD redirector . (Mark Sandler的想法)
我遇到了同样的问题并通过在我的笔记本中使用这样的函数来解决它:
仅供参考,在我的自定义函数中使用 Logger 而不是调用“print”为我创造了奇迹,因为stdout通常由jupyter缓冲而未显示在“Loss is Nan”之前的错误 - 这是使用该函数的重点在我的情况下,首先 .
您可以检查启动
jupyter notebook
的终端以查看消息 .在终端,我可以看到:
一个简单的方法,在常规python中尝试过,但还没有jupyter .
os.dup2(sys.stdout.fileno(), 1) os.dup2(sys.stdout.fileno(), 2)
解释如下:In python, how to capture the stdout from a c++ shared library to a variable
我遇到的问题是,为什么使用
sess.run(opt)
或opt.eval()
的选项对我来说不是解决方案的原因 . 最好的方法是使用tf.Print()
并将日志记录重定向到外部文件 . 我使用临时文件执行此操作,我将其转移到这样的常规文件:然后在我的评估中我做到: