2020/12/10 14:53

# 出租车的故事

## 普通程序

# 方法一：函数法
def calculate_fee(distance_travelled):
return 10 + 2 * distance_travelled

for x in [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]:
print(calculate_fee(x))


12.0
16.0
20.0
28.0
30.0
50.0

# 方法二：菜鸟法
Kilometers = [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]
for i in Kilometers:
money = 10 + 2*i
print(money)

12.0
16.0
20.0
28.0
30.0
50.0


## 深度学习来帮忙

# 导入paddle和判断版本

paddle 2.0.0-rc0


## 编辑数据

x_data = paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]])
y_data = paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]])
print("x_data = ",x_data)
print("y_data = ",y_data)

x_data =  Tensor(shape=[6, 1], dtype=float32, place=CPUPlace, stop_gradient=True,
[[1.],
[3.],
[5.],
[9.],
[10.],
[20.]])
y_data =  Tensor(shape=[6, 1], dtype=float32, place=CPUPlace, stop_gradient=True,
[[12.],
[16.],
[20.],
[28.],
[30.],
[50.]])


## 定义模型

y_predict = w * x + b

Out=XW+b

linear = paddle.nn.Linear(in_features=1, out_features=1)  # 定义初始化神经网络


### 查看初始化策略

w 的值会先进行随机生成

b 的值会先以0进行代替

w_before_opt = linear.weight.numpy().item()  # 获取w的值
b_before_opt = linear.bias.numpy().item()  # 获取b的值

print("w before optimize: {}".format(w_before_opt))
print("b before optimize: {}".format(b_before_opt))


w before optimize: 0.7223087549209595
b before optimize: 0.0


## 优化神经网络

mse_loss = paddle.nn.MSELoss()
sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters = linear.parameters())


# 优化算法

total_epoch = 7000  # 运行轮数
for i in range(total_epoch):
y_predict = linear(x_data)
loss = mse_loss(y_predict, y_data)
loss.backward()
sgd_optimizer.step()

if i%1000 == 0:  # 每1000轮输出一次
print("epoch {} loss {}".format(i, loss.numpy()))

print("finished training， loss {}".format(loss.numpy()))


epoch 0 loss [1.5586754e-07]
epoch 1000 loss [1.5586754e-07]
epoch 2000 loss [1.5586754e-07]
epoch 3000 loss [1.5586754e-07]
epoch 4000 loss [1.5586754e-07]
epoch 5000 loss [1.5586754e-07]
epoch 6000 loss [1.5586754e-07]
finished training， loss [1.5586754e-07]


## 白话时间

### 查看结果

w_after_opt = linear.weight.numpy().item()
b_after_opt = linear.bias.numpy().item()

print("w after optimize: {}".format(w_after_opt))
print("b after optimize: {}".format(b_after_opt))


w after optimize: 2.0000507831573486
b after optimize: 9.999356269836426


# Hello PalldPalld 2.0 !

print("Hello PalldPalld 2.0 !")

Hello PalldPalld 2.0 !


0
0 收藏

0 评论
0 收藏
0