60分钟入门pytorch

1
2
3
import pandas as pd #数据分析
import numpy as np #科学计算
import torch
1
torch.empty(5,3)
1
2
3
4
5
6
tensor(1.00000e-13 *
       [[ 0.0000,  0.0000,  0.0000],
        [ 0.0000,  0.0015,  0.0000],
        [ 0.0000,  0.0000,  0.0000],
        [ 0.0000,  0.0000,  0.0000],
        [ 3.2014,  0.0000,  0.0000]])
1
torch.rand(5,3)
1
2
3
4
5
tensor([[ 0.8019,  0.8890,  0.8821],
        [ 0.2941,  0.4110,  0.9602],
        [ 0.9998,  0.7436,  0.7854],
        [ 0.6871,  0.3687,  0.3176],
        [ 0.0838,  0.4599,  0.9497]])
1
2
x = torch.zeros(5,3,dtype=torch.long)
print(x)
1
2
3
4
x = x.new_ones(5, 3, dtype=torch.double)      # new_* methods take in sizes
print(x)
x = torch.randn_like(x, dtype=torch.float)    # override dtype!
print(x.size())
1
2
3
4
5
6
tensor([[ 1.,  1.,  1.],
        [ 1.,  1.,  1.],
        [ 1.,  1.,  1.],
        [ 1.,  1.,  1.],
        [ 1.,  1.,  1.]], dtype=torch.float64)
torch.Size([5, 3])

operator

1
2
3
4
5
6
7
8
9
x = torch.rand(5,3)
y = torch.rand(5,3)
print(x+y)
print(torch.add(x,y))
result = torch.empty(5,3)
torch.add(x,y,out=result)
print(result)
x.add_(y)
print(x)
1
2
3
print(x)
print(x.t()) #转置
print(x.view(15)) #reshape
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
tensor([[ 0.4151,  1.7114,  0.6965],
        [ 1.1274,  0.6298,  1.3228],
        [ 1.0572,  1.5267,  1.6896],
        [ 0.7696,  0.6144,  0.9410],
        [ 1.2071,  1.1116,  0.9585]])
tensor([[ 0.4151,  1.1274,  1.0572,  0.7696,  1.2071],
        [ 1.7114,  0.6298,  1.5267,  0.6144,  1.1116],
        [ 0.6965,  1.3228,  1.6896,  0.9410,  0.9585]])
tensor([ 0.4151,  1.7114,  0.6965,  1.1274,  0.6298,  1.3228,  1.0572,
         1.5267,  1.6896,  0.7696,  0.6144,  0.9410,  1.2071,  1.1116,
         0.9585])

trans to numpy

1
2
3
4
5
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b) #是共享内存的
1
2
[2. 2. 2. 2. 2.]
tensor([ 2.,  2.,  2.,  2.,  2.], dtype=torch.float64)

autograd

1
2
3
4
5
6
x = torch.ones(2,2,requires_grad=True)
y = x + 2
print(y.grad_fn)
z = (y * y * 3).mean()
z.backward()
print(x.grad)
1
2
3
<AddBackward0 object at 0x000002862DC79518>
tensor([[ 4.5000,  4.5000],
        [ 4.5000,  4.5000]])
1
2
with torch.no_grad():  #不求导
    print((x ** 2).requires_grad)
1
False