标签:插入 imp 建议 数据量 变形 code src 指定 port
import torch
a = torch.rand(2, 3, 2, 3)
a
# 输出:
tensor([[[[0.4850, 0.0073, 0.8941],
[0.0208, 0.4396, 0.7841]],
[[0.0553, 0.3554, 0.0726],
[0.9669, 0.3918, 0.9356]],
[[0.6169, 0.2080, 0.1028],
[0.5816, 0.3509, 0.6983]]],
[[[0.7545, 0.8693, 0.4751],
[0.2206, 0.3384, 0.2877]],
[[0.9521, 0.6172, 0.5058],
[0.6835, 0.0624, 0.6261]],
[[0.7752, 0.3820, 0.5585],
[0.1547, 0.1420, 0.6051]]]])
# 将数据依次展开后,再变形为对应维度。所以,数据的顺序是一样的。且总的数据维度是不变的。
a.view(4, 9)
# 输出:
tensor([[0.4850, 0.0073, 0.8941, 0.0208, 0.4396, 0.7841, 0.0553, 0.3554, 0.0726],
[0.9669, 0.3918, 0.9356, 0.6169, 0.2080, 0.1028, 0.5816, 0.3509, 0.6983],
[0.7545, 0.8693, 0.4751, 0.2206, 0.3384, 0.2877, 0.9521, 0.6172, 0.5058],
[0.6835, 0.0624, 0.6261, 0.7752, 0.3820, 0.5585, 0.1547, 0.1420, 0.6051]])
# reshape和view的效果一样
a.reshape(4, 9)
# 输出:
tensor([[0.4850, 0.0073, 0.8941, 0.0208, 0.4396, 0.7841, 0.0553, 0.3554, 0.0726],
[0.9669, 0.3918, 0.9356, 0.6169, 0.2080, 0.1028, 0.5816, 0.3509, 0.6983],
[0.7545, 0.8693, 0.4751, 0.2206, 0.3384, 0.2877, 0.9521, 0.6172, 0.5058],
[0.6835, 0.0624, 0.6261, 0.7752, 0.3820, 0.5585, 0.1547, 0.1420, 0.6051]])
# 变成其他形状后,如果记得原数据形状,可以变形回去
a.view(4, 9).view(2, 3, 2, 3)
# 输出:
tensor([[[[0.4850, 0.0073, 0.8941],
[0.0208, 0.4396, 0.7841]],
[[0.0553, 0.3554, 0.0726],
[0.9669, 0.3918, 0.9356]],
[[0.6169, 0.2080, 0.1028],
[0.5816, 0.3509, 0.6983]]],
[[[0.7545, 0.8693, 0.4751],
[0.2206, 0.3384, 0.2877]],
[[0.9521, 0.6172, 0.5058],
[0.6835, 0.0624, 0.6261]],
[[0.7752, 0.3820, 0.5585],
[0.1547, 0.1420, 0.6051]]]])
a = torch.rand(2, 3)
a
# 输出:
tensor([[0.3074, 0.2152, 0.0082],
[0.2831, 0.9236, 0.2705]])
# 在0维处,增加1个维度。
a.unsqueeze(0)
# 支持负数索引,等价于 a.unsqueeze(-3)
# 输出:
tensor([[[0.3074, 0.2152, 0.0082],
[0.2831, 0.9236, 0.2705]]])
a.unsqueeze(1)
# 等价于a.unsqueeze(-2)
# 输出:
tensor([[[0.3074, 0.2152, 0.0082]],
[[0.2831, 0.9236, 0.2705]]])
a.unsqueeze(2)
# 等价于a.unsqueeze(-1)
# 输出:
tensor([[[0.3074],
[0.2152],
[0.0082]],
[[0.2831],
[0.9236],
[0.2705]]])
a.unsqueeze(0).unsqueeze(-1) # 分别在0和最后维度上面都增加了一个维度,相比上面的数据,外层多了一对[]括号
# 输出:
tensor([[[[0.3074],
[0.2152],
[0.0082]],
[[0.2831],
[0.9236],
[0.2705]]]])
a = torch.rand(2, 1, 3, 1, 2, 1)
a
# 输出:
tensor([[[[[[0.4492],
[0.6223]]],
[[[0.8522],
[0.4971]]],
[[[0.2585],
[0.6034]]]]],
[[[[[0.1904],
[0.5564]]],
[[[0.3120],
[0.2698]]],
[[[0.6620],
[0.5963]]]]]])
a.squeeze() # 默认删除所有为1的维度,即(2, 1, 3, 1, 2, 1)删除后得到(2, 3, 2)
a
# 输出:
tensor([[[0.4492, 0.6223],
[0.8522, 0.4971],
[0.2585, 0.6034]],
[[0.1904, 0.5564],
[0.3120, 0.2698],
[0.6620, 0.5963]]])
a.squeeze(1).squeeze(2).squeeze(3) # 通过指定维度删除1,与上面效果相同
a
# 输出:
tensor([[[0.4492, 0.6223],
[0.8522, 0.4971],
[0.2585, 0.6034]],
[[0.1904, 0.5564],
[0.3120, 0.2698],
[0.6620, 0.5963]]])
a = torch.rand(2, 3, 1)
a
# 输出:
tensor([[[0.1162],
[0.6026],
[0.5674]],
[[0.7272],
[0.4351],
[0.1708]]])
# 扩展数据,只有维度为1的地方可以被扩展
a.expand(2, -1, 3)
# 输出:
tensor([[[0.1162, 0.1162, 0.1162],
[0.6026, 0.6026, 0.6026],
[0.5674, 0.5674, 0.5674]],
[[0.7272, 0.7272, 0.7272],
[0.4351, 0.4351, 0.4351],
[0.1708, 0.1708, 0.1708]]])
a
# 输出:
tensor([[[0.1162],
[0.6026],
[0.5674]],
[[0.7272],
[0.4351],
[0.1708]]])
a.repeat(1, 1, 3)
# 输出:
tensor([[[0.1655, 0.1655, 0.1655],
[0.2952, 0.2952, 0.2952],
[0.8438, 0.8438, 0.8438]],
[[0.0215, 0.0215, 0.0215],
[0.0677, 0.0677, 0.0677],
[0.6002, 0.6002, 0.6002]]])
a = torch.rand(5, 2)
a
# 输出:
tensor([[0.9841, 0.2180],
[0.5082, 0.8553],
[0.5250, 0.7228],
[0.9064, 0.0074],
[0.2752, 0.3939]])
a.t()
# 输出:
tensor([[0.9841, 0.5082, 0.5250, 0.9064, 0.2752],
[0.2180, 0.8553, 0.7228, 0.0074, 0.3939]])
a = torch.rand(2, 3, 4)
a
# 输出:
tensor([[[0.1696, 0.6733, 0.7269, 0.1066],
[0.5433, 0.2820, 0.0214, 0.9471],
[0.6922, 0.8220, 0.8422, 0.8682]],
[[0.6121, 0.8133, 0.6502, 0.4529],
[0.9810, 0.9233, 0.5279, 0.2193],
[0.1775, 0.8487, 0.4938, 0.3994]]])
a.transpose(0, 2)
# 输出:
tensor([[[0.1696, 0.6121],
[0.5433, 0.9810],
[0.6922, 0.1775]],
[[0.6733, 0.8133],
[0.2820, 0.9233],
[0.8220, 0.8487]],
[[0.7269, 0.6502],
[0.0214, 0.5279],
[0.8422, 0.4938]],
[[0.1066, 0.4529],
[0.9471, 0.2193],
[0.8682, 0.3994]]])
a.transpose(0, 2).contiguous().view(4, 6)
# 输出:
tensor([[0.1696, 0.6121, 0.5433, 0.9810, 0.6922, 0.1775],
[0.6733, 0.8133, 0.2820, 0.9233, 0.8220, 0.8487],
[0.7269, 0.6502, 0.0214, 0.5279, 0.8422, 0.4938],
[0.1066, 0.4529, 0.9471, 0.2193, 0.8682, 0.3994]])
与transpose不同的是:
a = torch.rand(2, 3, 4)
a
# 输出:
tensor([[[0.7855, 0.6239, 0.3785, 0.8138],
[0.9595, 0.5210, 0.3816, 0.1612],
[0.0152, 0.9714, 0.4245, 0.4754]],
[[0.1475, 0.4961, 0.0812, 0.3769],
[0.4279, 0.0595, 0.0717, 0.9871],
[0.9480, 0.3525, 0.3076, 0.0367]]])
a.permute(1, 2, 0)
# 输出:
tensor([[[0.7855, 0.1475],
[0.6239, 0.4961],
[0.3785, 0.0812],
[0.8138, 0.3769]],
[[0.9595, 0.4279],
[0.5210, 0.0595],
[0.3816, 0.0717],
[0.1612, 0.9871]],
[[0.0152, 0.9480],
[0.9714, 0.3525],
[0.4245, 0.3076],
[0.4754, 0.0367]]])
# 通过2次transpose可以实现permute的效果
a.transpose(0, 1).transpose(1, 2)
# 输出:
tensor([[[0.7855, 0.1475],
[0.6239, 0.4961],
[0.3785, 0.0812],
[0.8138, 0.3769]],
[[0.9595, 0.4279],
[0.5210, 0.0595],
[0.3816, 0.0717],
[0.1612, 0.9871]],
[[0.0152, 0.9480],
[0.9714, 0.3525],
[0.4245, 0.3076],
[0.4754, 0.0367]]])
a = torch.rand(3, 4, 2)
a
# 输出:
tensor([[[0.7220, 0.7834],
[0.4305, 0.6128],
[0.6032, 0.5751],
[0.2304, 0.3003]],
[[0.1044, 0.1071],
[0.1373, 0.4874],
[0.3963, 0.5231],
[0.1851, 0.8962]],
[[0.0677, 0.0587],
[0.7268, 0.8807],
[0.5445, 0.2110],
[0.8755, 0.8577]]])
b = torch.rand(1, 4, 2) # b的第一维度为1,a的为3
b
# 输出:
tensor([[[0.6676, 0.5702],
[0.3334, 0.8553],
[0.5392, 0.0754],
[0.9488, 0.7814]]])
# 相加和,b被自动广播为3,然后再与a相加,得到结果
a + b
# 输出:
tensor([[[1.0925, 1.1274],
[0.7881, 1.6091],
[0.6568, 0.8419],
[1.8460, 1.7360]],
[[0.9783, 0.9697],
[1.0127, 1.3521],
[0.7147, 0.3189],
[1.3435, 1.3845]],
[[0.8699, 1.0997],
[0.5712, 1.8381],
[0.6607, 1.0689],
[0.9599, 1.7686]]])
标签:插入 imp 建议 数据量 变形 code src 指定 port
原文地址:https://www.cnblogs.com/jaysonteng/p/13034758.html