Outline

  1. Pytorch Tutorial

Pytorch Tutorial

[1/4] Pytorch란?

[2/4] Pytorch Tutorial

empty

import torch

x = torch.empty(5, 3)
print(x)
print(type(x))
out:
tensor([[4.8689e-36, 0.0000e+00, 5.6052e-45],
[ 0.0000e+00, 1.4013e-45, 0.0000e+00],
[ 1.4013e-45, 0.0000e+00, -2.0294e+00],
[-3.0359e-01, -6.3788e-01, 1.1869e-01],
[-2.8520e-01, -6.8363e-01, -4.3497e-01]])

    <class 'torch.Tensor'>

random

x = torch.rand(5, 3)
print(x)
tensor([[0.7194, 0.6460, 0.8726],
[0.3167, 0.1146, 0.4650],
[0.5900, 0.7723, 0.1102],
[0.3511, 0.8640, 0.3159],
[0.3506, 0.8203, 0.9907]])

zeros

x = torch.zeros(5, 3, dtype=torch.long)
print(x)
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])

tensor

x = torch.tensor([5.5, 3])
print(x)
tensor([5.5000, 3.0000])

new ones

x = x.new_ones(5, 3, dtype=torch.double)
print(x)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)

randn_like

x = torch.randn_like(x, dtype=torch.float)
print(x)
tensor([[-1.6818, 1.0321, -0.8268],
[ 0.5849, 0.2614, -1.0141],
[-1.3403, 0.0985, -2.0294],
[-0.3036, -0.6379, 0.1187],
[-0.2852, -0.6836, -0.4350]])

size

print(x.size())
torch.Size([5, 3])

덧셈

y = torch.rand(5, 3)
print(x + y)

print(torch.add(x, y))
tensor([[1.1330, 1.1318, -0.0505],
[ 1.3939, 1.1446, 1.4834],
[ 0.0519, -0.4940, 0.8106],
[ 1.4957, 1.4173, 2.0778],
[-0.7459, 0.8813, -0.7525]])
tensor([[1.1330, 1.1318, -0.0505],
[ 1.3939, 1.1446, 1.4834],
[ 0.0519, -0.4940, 0.8106],
[ 1.4957, 1.4173, 2.0778],
[-0.7459, 0.8813, -0.7525]])
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
tensor([[1.1330, 1.1318, -0.0505],
[ 1.3939, 1.1446, 1.4834],
[ 0.0519, -0.4940, 0.8106],
[ 1.4957, 1.4173, 2.0778],
[-0.7459, 0.8813, -0.7525]])
# y에 x 더하기

y.add\_(x)
print(y)
tensor([[1.1330, 1.1318, -0.0505],
[ 1.3939, 1.1446, 1.4834],
[ 0.0519, -0.4940, 0.8106],
[ 1.4957, 1.4173, 2.0778],
[-0.7459, 0.8813, -0.7525]])

indexing

print(x)
print(x[:, 1])
tensor([[0.3303, 0.9045, -0.1993],
[ 0.5441, 0.5543, 0.9550],
[-0.4737, -0.6022, 0.5375],
[ 1.3710, 0.9057, 1.3939],
[-0.9738, 0.0332, -1.4871]])
tensor([ 0.9045, 0.5543, -0.6022, 0.9057, 0.0332])

view

x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8)
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])

item

x = torch.randn(1)
print(x)
print(x.item())
tensor([-1.8750])
-1.874952793121338

numpy와의 호환성

import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)

[3/4] Autograd

requires_grad

x = torch.ones(2, 2, requires_grad=True)
print(x)
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
a = torch.randn(2, 2)
a = ((a _ 8) / (a - 2))
print(a.requires*grad)
a.requires_grad*(True)
print(a.requires_grad)
b = (a _ a).sum()
print(b.grad_fn)
False
True
<SumBackward0 object at 0x7effda51c518>

grad_fn

y = x + 2
print(y)
tensor([[3., 3.],
[3., 3.]], grad_fn=<AddBackward0>)
z = y _ y _ 3
out = z.mean()

print(z, out)
tensor([[27., 27.],
[27., 27.]], grad_fn=<MulBackward0>)
tensor(27., grad_fn=<MeanBackward0>)

[4/4] Gradient

grad

out.backward()
print(x.grad)
tensor([[4.5000, 4.5000],
[4.5000, 4.5000]])

야코비안 행렬

torch.autograd

정리

tensorflow와 pytorch 둘다 접해보았지만 확실히 pytorch가 조금 더 이해가 빠르고 numpy와 형변환도 자유로워 좋은것 같다.

지금 보고있는 유튜브 강의(모두를 위한 딥러닝2 pytorch)가 pytorch로 딥러닝을 시작하기에 좋은 강의인것 같다. 김성훈 교수님의 모두를 위한 딥러닝1 tensorflow도 잘 가르쳐 주셔서 재미있게 들을 수 있다.