masked_select(input, mask) a[a>0]ByteTensor
nonzero(input) 0
API
Out [3]: torch.Size([2, 3]) torch.Size([6]) torch.Size([3,
2])
view()Tensor
In [4]: x += 1
[1.4564, 1.3117, 1.5181]])
23
[0.4564, 0.3117, 0.5181]])
data
print(x)
tensor([[1],
[3, 4],
[4, 5]])
x y 1231 x + y x 2 y 3 232
26
tensor([2., 2., 2.]) [2. 2. 2.]
tensor([3., 3., 3.]) [3. 3. 3.]
NumPyarrayTensortorch.tensor(), Tensor
a = np.ones(3)
b = torch.from_numpy(a)
print(a, b)
a += 1
print(a, b)
b += 1
print(a, b)
Out [1]: [1. 1. 1.] tensor([1., 1., 1.], dtype=torch.float64)
[2. 2. 2.] tensor([2., 2., 2.], dtype=torch.float64)
[3. 3. 3.] tensor([3., 3., 3.], dtype=torch.float64)
NumPyTensor
a += 1
print(a, c)
Out [2]: [4. 4. 4.] tensor([3., 3., 3.], dtype=torch.float64)
28
device = torch.device("cuda") # GPU y = torch.ones_like(x,
device=device) # GPUTensor x = x.to(device) # .to("cuda") z = x +
y
print(z)
tensor([[2., 3.]], dtype=torch.float64)
GPU CPU
gradientPyTorchautograd
.detach() with torch.no_grad() requires_grad=True
30
print(x)
print(x.grad_fn)
[1., 1.]], requires_grad=True)
print(y)
print(y.grad_fn)
<AddBackward0 object at 0x7fab97795828>
out = z.mean()
tensor(27., grad_fn = <MeanBackward0>)
a = ((a * 3) / (a - 1))
print(a.requires_grad) # False
y = x + 2
[4.5000, 4.5000]])
= 1
out2.backward()
print(x.grad)
[5.5000, 5.5000]])
x1
× × ×
. ()
= . ( ∗ )
35
In [1]: x = torch.tensor([1.0, 2.0, 3.0, 4.0],
requires_grad=True)
y = 2 * x
z = y.view(2, 2)
In [2]: v = torch.tensor([[1.0, 0.1], [0.01, 0.001]],
dtype=torch.float)
z.backward(v)
print(x.grad)
36