> Source: Author

> Source: Author

def generate_data(m=0.1, b=0.3, n=200):

x = np.random.uniform(-10, 10, n)

noise = np.random.normal(0, 0.15, n)

y = (m * x + b ) + noise

return x.astype(np.float32), y.astype(np.float32)

x, y = generate_data()

plt.figure(figsize = (12,5))

ax = plt.subplot(111)

ax.scatter(x,y, c = “b”, label=”samples”)

Tensorflow动态模型

class LinearRegressionKeras(tf.keras.Model):

def __init__(self):

super().__init__()

self.w = tf.Variable(tf.random.uniform(shape=[1], -0.1, 0.1))

self.b = tf.Variable(tf.random.uniform(shape=[1], -0.1, 0.1))

def __call__(self,x):

return x * self.w + self.b

Pytorch动态模型

class LinearRegressionPyTorch(torch.nn.Module):

def __init__(self):

super().__init__()

self.w = torch.nn.Parameter(torch.Tensor(1, 1).uniform_(-0.1, 0.1))

self.b = torch.nn.Parameter(torch.Tensor(1).uniform_(-0.1, 0.1))

def forward(self, x):

return x @ self.w + self.b

Tensorflow训练循环

def squared_error(y_pred, y_true):

return tf.reduce_mean(tf.square(y_pred – y_true))

tf_model = LinearRegressionKeras()

[w, b] = tf_model.trainable_variables

for epoch in range(epochs):

predictions = tf_model(x)

loss = squared_error(predictions, y)

if epoch % 20 == 0:

print(f”Epoch {epoch} : Loss {loss.numpy()}”)

Pytorch训练循环

def squared_error(y_pred, y_true):

torch_model = LinearRegressionPyTorch()

[w, b] = torch_model.parameters()

for epoch in range(epochs):

y_pred = torch_model(inputs)

loss = squared_error(y_pred, labels)

loss.backward()

if epoch % 20 == 0:

print(f”Epoch {epoch} : Loss {loss.data}”)

Pytorch和Tensorflow模型重用可用层

class LinearRegressionKeras(tf.keras.Model):

def __init__(self):

super().__init__()

self.linear = tf.keras.layers.Dense(1, activation=None) # , input_shape=[1]

def call(self, x):

return self.linear(x)

class LinearRegressionPyTorch(torch.nn.Module):

def __init__(self):

super(LinearRegressionPyTorch, self).__init__()

self.linear = torch.nn.Linear(1, 1)

def forward(self, x):

return self.linear(x)

Tensorflow训练循环，易于拟合方法

tf_model_train_loop = LinearRegressionKeras()

optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)

for epoch in range(epochs * 3):

x_batch = tf.reshape(x, [200, 1])

y_pred = tf_model_train_loop(x_batch)

y_pred = tf.reshape(y_pred, [200])

loss = tf.losses.mse(y_pred, y)

if epoch % 20 == 0:

print(f”Epoch {epoch} : Loss {loss.numpy()}”)

torch_model = LinearRegressionPyTorch()

criterion = torch.nn.MSELoss(reduction=’mean’)

optimizer = torch.optim.SGD(torch_model.parameters(), lr=learning_rate)

for epoch in range(epochs * 3):

y_pred = torch_model(inputs)

loss = criterion(y_pred, labels)

loss.backward()

optimizer.step()

if epoch % 20 == 0:

print(f”Epoch {epoch} : Loss {loss.data}”)

[w_tf, b_tf] = tf_model_fit.trainable_variables

[w2_tf, b2_tf] = tf_model_train_loop.trainable_variables

[w_torch, b_torch] = torch_model.parameters()

w_tf = tf.reshape(w_tf, [1])

w2_tf = tf.reshape(w2_tf, [1])

plt.figure(figsize = (12,5))

ax = plt.subplot(111)

ax.scatter(x, y, c = “b”, label=”samples”)

ax.plot(x, w_tf * x + b_tf, “r”, linewidth = 5.0, label = “tensorflow fit”)

ax.plot(x, w2_tf * x + b2_tf, “y”, linewidth = 5.0, label = “tensorflow train loop”)

ax.plot(x, w_torch * inputs + b_torch, “c”, linewidth = 5.0, label = “pytorch”)

ax.legend()

plt.xlabel(“x1”)

plt.ylabel(“y”,rotation = 0)

> Source: Author

Pytorch和新Tensorflow 2.x都支持动态图形和自动差分核心功能，以提取图表中使用的所有参数的渐变。您可以轻松地在Python中实现训练循环，其中包含任何损失函数和渐变后代优化器。为了专注于两个框架之间的真实核心差异，我们通过实施自己的简单MSE和NaïveSGD来简化上面的示例。

> Source: Author

(本文由闻数起舞翻译自Jacopo Mangiavacchi的文章《A tale of two frameworks: PyTorch vs. TensorFlow》，转载请注明出处，原文链接：

https://medium.com/data-science-at-microsoft/a-tale-of-two-frameworks-pytorch-vs-tensorflow-f73a975e733d)