Numpy + AutoGrad
n_array= np.arange(10).reshape(2,5)
t_array=torch.FloatTensor(n_array)
print(t_array)
print(t_array.ndim, t_array.shape)
t_data.device #device(type='cpu')
if torch.cuda.is_available():
t_data_cuda=t_data.to("cuda")
t_data_cuda.device #device(type='cuda')
→ mostly same as numpy operations