您的位置:首页 > 其它

pytorch入门(3)张量操作以及线性回归

2020-07-14 06:23 141 查看

总结:
torch.cat()不扩张张量的维度
功能:将张量按维度dim进行拼接
·tensors:张量序列
·dim:要拼接的维度
torch.chunk()将张量按维度dim进行平均切分。
torch.split()将张量按照维度dim=?进行切分。
torch.index_select()在维度dim上,按照index索引数据(在维度上,按照index索引数据返回值;依index索引数据的拼接的张量)
torch.masked_select() 按照mask中的True进行索引(按照mask中的True进行索引,返回值:一维张量)
torch.reshape()变换张量形状。当张量在内存中是连续时,新张量与input共享数据内存。
torch.transpose(input,dim0是要交换的维度,dim1是要交换的维度)变换张量的两个维度
torch.spueeze()压缩长度为1的维度(轴)(dim:若为None,移除所有长度为1的轴;若指定难度,当且仅当该轴长度为1时候,可以被移除)
torch.add(input,alpha,other)

1)torch.chunk()


torch.chunk()
功能:将张量按维度dim进行平均切分
返回值:张量列表
注意事项:若不能整除,最后一份张量小于其他张量
·input:要切分的张量
·chunks:要切分的份数
·dim:要切分的维度

# torch.chunk
# flag = True
flag = False
if flag:
a = torch.ones((2, 7))  # 7
list_of_tensors = torch.chunk(a, dim=1, chunks=3)   # 3
for idx, t in enumerate(list_of_tensors):
print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))

2)torch.split()

功能:将张量按维度dim进行切分
返回值:张量列表
·tensor:要切分的张量
·split_size_or_sections:为int时,表示每一份的长度(这个时候和上面的1.3的差不多);为list时,按list元素切分,list里面的元素之和要和tensor的长度要一样,否则会报错。
·dim:要切分的维度

flag = True
# flag = False
if flag:
t = torch.ones((2, 5))
list_of_tensors = torch.split(t, 2, dim=1)  # [2 , 1, 2]
for idx, t in enumerate(list_of_tensors):
print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))
# list_of_tensors = torch.split(t, [2, 1, 2], dim=1)
# for idx, t in enumerate(list_of_tensors):
#     print("第{}个张量:{}, shape is {}".format(idx, t, t.shape))

3)torch.index_select()

flag = True
# flag = False
if flag:
t = torch.randint(0, 9, size=(3, 3))
idx = torch.tensor([0, 2], dtype=torch.long)    # float
t_select = torch.index_select(t, dim=1, index=idx)
print("t:\n{}\nt_select:\n{}".format(t, t_select))
'''
t:
tensor([[4, 5, 0],
[5, 7, 1],
[2, 5, 8]])
t_select:
tensor([[4, 0],
[5, 1],
[2, 8]])

Process finished with exit code 0
'''

4)torch.masked_select

if flag:
t = torch.randint(0, 9, size=(3, 3))
mask = t.ge(5)
# ge is mean greater than or equal/   gt: greater than  le  lt
t_select = torch.masked_select(t, mask)
print("t:\n{}\nmask:\n{}\nt_select:\n{} ".format(t, mask, t_select))
'''
t:
tensor([[4, 5, 0],
[5, 7, 1],
[2, 5, 8]])
mask:
tensor([[False,  True, False],
[ True,  True, False],
[False,  True,  True]])
t_select:
tensor([5, 5, 7, 5, 8])

Process finished with exit code 0
'''

5)torch.reshape

flag = True
# flag = False
if flag:
t = torch.randperm(8)
t_reshape = torch.reshape(t, (-1, 2, 2))    # -1
print("t:{}\nt_reshape:\n{}".format(t, t_reshape))
t[0] = 1024
print("t:{}\nt_reshape:\n{}".format(t, t_reshape))
print("t.data 内存地址:{}".format(id(t.data)))
print("t_reshape.data 内存地址:{}".format(id(t_reshape.data)))
'''
t:tensor([5, 4, 2, 6, 7, 3, 1, 0])
t_reshape:
tensor([[[5, 4],
[2, 6]],

[[7, 3],
[1, 0]]])
t:tensor([1024,    4,    2,    6,    7,    3,    1,    0])
t_reshape:
tensor([[[1024,    4],
[   2,    6]],

[[   7,    3],
[   1,    0]]])
t.data 内存地址:1449817144920
t_reshape.data 内存地址:1449817144920
Process finished with exit code 0

'''

6)torch.transpose()

# torch.transpose
t = torch.rand((2, 3, 4))
print(t)
t_transpose = torch.transpose(t, dim0=1, dim1=2)    # c*h*w     h*w*c
print("t shape:{}\nt_transpose shape: {}".format(t.shape, t_transpose.shape))

'''
tensor([[[0.7576, 0.2793, 0.4031, 0.7347],
[0.0293, 0.7999, 0.3971, 0.7544],
[0.5695, 0.4388, 0.6387, 0.5247]],

[[0.6826, 0.3051, 0.4635, 0.4550],
[0.5725, 0.4980, 0.9371, 0.6556],
[0.3138, 0.1980, 0.4162, 0.2843]]])
t shape:torch.Size([2, 3, 4])
t_transpose shape: torch.Size([2, 4, 3])
Process finished with exit code 0
'''

7)torch.squeeze()

t = torch.rand((1, 2, 3, 1))
print(t)
t_sq = torch.squeeze(t)
t_0 = torch.squeeze(t, dim=0)
t_1 = torch.squeeze(t, dim=1)
print(t.shape)
print(t_sq.shape)
print(t_0.shape)
print(t_1.shape)
'''
tensor([[[[0.7576],
[0.2793],
[0.4031]],

[[0.7347],
[0.0293],
[0.7999]]]])
torch.Size([1, 2, 3, 1])
torch.Size([2, 3])
torch.Size([2, 3, 1])
torch.Size([1, 2, 3, 1])
Process finished with exit code 0
'''

8)torch的数学张量运算

t_0 = torch.randn((3, 3))
t_1 = torch.ones_like(t_0)
t_add = torch.add(t_0, 10, t_1)
print("t_0:\n{}\nt_1:\n{}\nt_add_10:\n{}".format(t_0, t_1, t_add))

'''
t_0:
tensor([[ 0.6614,  0.2669,  0.0617],
[ 0.6213, -0.4519, -0.1661],
[-1.5228,  0.3817, -1.0276]])
t_1:
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
t_add_10:
tensor([[10.6614, 10.2669, 10.0617],
[10.6213,  9.5481,  9.8339],
[ 8.4772, 10.3817,  8.9724]])
Process finished with exit code 0
'''

9)线性回归



内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: