# 一步步带你探究如何高效使用TensorFlow

+关注继续查看

Tensorflow基础知识：

Tensorflow和其他数字计算库（如numpy）之间最明显的区别在于Tensorflow中操作的是符号。这是一个强大的功能，这保证了Tensorflow可以做很多其他库（例如numpy）不能完成的事情（例如自动区分）。这可能也是它更复杂的原因。今天我们来一步步探秘Tensorflow，并为更有效地使用Tensorflow提供了一些指导方针和最佳实践。

import numpy as np
x = np.random.normal(size=[10, 10])
y = np.random.normal(size=[10, 10])
z = np.dot(x, y)
print(z)


import tensorflow as tf
x = tf.random_normal([10, 10])
y = tf.random_normal([10, 10])
z = tf.matmul(x, y)
sess = tf.Session()
z_val = sess.run(z)
print(z_val)


Tensor("MatMul:0", shape=(10, 10), dtype=float32)

import numpy as np
import tensorflow as tf
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
w = tf.get_variable("w", shape=[3, 1])
f = tf.stack([tf.square(x), x, tf.ones_like(x)], 1)
yhat = tf.squeeze(tf.matmul(f, w), 1)
loss = tf.nn.l2_loss(yhat - y) + 0.1 * tf.nn.l2_loss(w)
def generate_data():
x_val = np.random.uniform(-10.0, 10.0, size=100)
y_val = 5 * np.square(x_val) + 3
return x_val, y_val
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for _ in range(1000):
x_val, y_val = generate_data()
_, loss_val = sess.run([train_op, loss], {x: x_val, y: y_val})
print(loss_val)
print(sess.run([w]))


[4.9924135, 0.00040895029, 3.4504161]

Tensorflow中的张量在图形构造期间具有静态的形状属性。例如，我们可以定义一个形状的张量[None128]

import tensorflow as tf
a = tf.placeholder([None, 128])

static_shape = a.get_shape().as_list()  # returns [None, 128]

dynamic_shape = tf.shape(a)

a.set_shape([32, 128])

a =  tf.reshape(a, [32, 128])

def get_shape(tensor):
static_shape = tensor.get_shape().as_list()
dynamic_shape = tf.unstack(tf.shape(tensor))
dims = [s[1] if s[0] is None else s[0]
for s in zip(static_shape, dynamic_shape)]
return dims


b = placeholder([None, 10, 32])
shape = get_shape(tensor)
b = tf.reshape(b, [shape[0], shape[1] * shape[2]])


import tensorflow as tf
import numpy as np
def reshape(tensor, dims_list):
shape = get_shape(tensor)
dims_prod = []
for dims in dims_list:
if isinstance(dims, int):
dims_prod.append(shape[dims])
elif all([isinstance(shape[d], int) for d in dims]):
dims_prod.append(np.prod([shape[d] for d in dims]))
else:
dims_prod.append(tf.prod([shape[d] for d in dims]))
tensor = tf.reshape(tensor, dims_prod)
return tensor

b = placeholder([None, 10, 32])
b = tf.reshape(b, [0, [1, 2]])


Tensorflow同样支持广播机制。当要执行加法和乘法运算时，你需要确保操作数的形状匹配，例如，你不能将形状[32]的张量添加到形状的张量[3,4]。但有一个特殊情况，那就是当你有一个单一的维度。Tensorflow隐含地功能可以将张量自动匹配另一个操作数的形状。例如：

import tensorflow as tf
a = tf.constant([[1., 2.], [3., 4.]])
b = tf.constant([[1.], [2.]])
# c = a + tf.tile(a, [1, 2])
c = a + b


a = tf.random_uniform([5, 3, 5])
b = tf.random_uniform([5, 1, 6])
# concat a and b and apply nonlinearity
tiled_b = tf.tile(b, [1, 3, 1])
c = tf.concat([a, tiled_b], 2)
d = tf.layers.dense(c, 10, activation=tf.nn.relu)


pa = tf.layers.dense(a, 10, activation=None)
pb = tf.layers.dense(b, 10, activation=None)
d = tf.nn.relu(pa + pb)


def tile_concat_dense(a, b, units, activation=tf.nn.relu):
pa = tf.layers.dense(a, units, activation=None)
pb = tf.layers.dense(b, units, activation=None)
c = pa + pb
if activation is not None:
c = activation(c)
return c

a = tf.constant([[1.], [2.]])
b = tf.constant([1., 2.])
c = tf.reduce_sum(a + b)


a = tf.constant([[1.], [2.]])
b = tf.constant([1., 2.])
c = tf.reduce_sum(a + b, 0)


import numpy as np
import tensorflow as tf
import uuid
def relu(inputs):
# Define the op in python
def _relu(x):
return np.maximum(x, 0.)
# Define the op's gradient in python
return np.float32(x > 0)
x = op.inputs[0]
# Register the gradient with a unique id
# Override the gradient of the custom op
g = tf.get_default_graph()
output = tf.py_func(_relu, [inputs], tf.float32)
return output


x = tf.random_normal([10])
y = relu(x * x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)


image = tf.placeholder(tf.float32)
tf.summary.image("image", image)


import io
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
def visualize_labeled_images(images, labels, max_outputs=3, name='image'):
def _visualize_image(image, label):
# Do the actual drawing in python
fig = plt.figure(figsize=(3, 3), dpi=80)
ax.imshow(image[::-1,...])
ax.text(0, 0, str(label),
horizontalalignment='left',
verticalalignment='top')
fig.canvas.draw()
# Write the plot as a memory file.
buf = io.BytesIO()
data = fig.savefig(buf, format='png')
buf.seek(0)
# Read the image and convert to numpy array
img = PIL.Image.open(buf)
return np.array(img.getdata()).reshape(img.size[0], img.size[1], -1)
def _visualize_images(images, labels):
# Only display the given number of examples in the batch
outputs = []
for i in range(max_outputs):
output = _visualize_image(images[i], labels[i])
outputs.append(output)
return np.array(outputs, dtype=np.uint8)
# Run the python op.
figs = tf.py_func(_visualize_images, [images, labels], tf.uint8)
return tf.summary.image(name, figs)


|
6月前
|

PyTorch基础知识(超基础)
PyTorch基础知识(超基础)
94 0
|
PyTorch 算法框架/工具
pytorch使用方法积累
1. net.parameters()查看网络参数 2. torch.optim.lr_scheduler.MultiStepLR 2.1 学习率的参数配置
62 0
|

PyTorch学习系列教程：构建一个深度学习模型需要哪几步？

444 0
|

513 0
|

263 0
|

201 0
|

Tensorflow 2.0的这些新设计，你适应好了吗？

1300 0
|

[译] 写给机器学习工程师：如何测试Tensorflow模型
## 引言 这篇文章来自斯坦福大学计算与数学工程所（Institute for Computational & Mathematical Engineering）博士生Guillaume Genthial的[博客](https://guillaumegenthial.github.io/testing.html)。主要介绍了如何将工程界里已经得到充分认可的单元测试实践应用到算法建模的领域中，
1905 0