torch之gpu训练

酥酥 发布于 2022-04-16 80 次阅读


GPU计算

				
					!nvidia-smi # 对Linux/macOS用户有效
				
			
				
					import torch
from torch import nn

print(torch.__version__)
				
			
1.11.0+cu113

计算设备

				
					torch.cuda.is_available() # cuda是否可用
				
			
True
				
					torch.cuda.device_count() # gpu数量
				
			
1
				
					torch.cuda.current_device() # 当前设备索引, 从0开始
				
			
0
				
					torch.cuda.get_device_name(0) # 返回gpu名字
				
			
'GeForce GTX 1050'

Tensor的GPU计算

				
					x = torch.tensor([1, 2, 3])
x
				
			
tensor([1, 2, 3])
				
					x = x.cuda(0)
x
				
			
tensor([1, 2, 3], device='cuda:0')
				
					x.device
				
			
device(type='cuda', index=0)
				
					device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

x = torch.tensor([1, 2, 3], device=device)
# or
x = torch.tensor([1, 2, 3]).to(device)
x
				
			
tensor([1, 2, 3], device='cuda:0')
				
					y = x**2
y
				
			
tensor([1, 4, 9], device='cuda:0')
				
					# z = y + x.cpu()
				
			

模型的GPU计算

				
					net = nn.Linear(3, 1)
list(net.parameters())[0].device
				
			
device(type='cpu')
				
					net.cuda()
list(net.parameters())[0].device
				
			
device(type='cuda', index=0)
				
					x = torch.rand(2,3).cuda()
net(x)
				
			
tensor([[-0.5574],
        [-0.3792]], device='cuda:0', grad_fn=<ThAddmmBackward>)