import torch
import torchwordemb
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1)


class Net(nn.Module):
    def __init__(self):
    super(Net, self).__init__()
    self.embedding = nn.Embedding(1000, 100)
    self.lstm = nn.LSTM(100, 30, num_layers=1, bidirectional=True)
    self.out_layer = nn.Linear(60, 2)

def forward(self, x):
    x = x.t()
    batch_size = x.size(1)
    emb = self.embedding(x)
    print emb.size()
    hidden = self.init_hidden(batch_size)
    lstm_out, hidden = self.lstm(emb.view(len(emb), batch_size, -1), hidden)
    print 'LSTM Out: ', lstm_out[-1].size()
    out_layer = self.out_layer(lstm_out[-1])
    return out_layer
def init_hidden(self, batch_size):
    return (create_variable(torch.zeros(2, batch_size, 30)),
            create_variable(torch.zeros(2, batch_size, 30)))

x = [[2, 30, 40, 1, 0], [20, 3, 5, 10, 3], [5, 2, 4, 80, 1]]
def create_variable(tensor):
# Do cuda() before wrapping with variable
    if torch.cuda.is_available():
        return Variable(tensor.cuda())
    else:
        return Variable(tensor)
x = create_variable(torch.LongTensor(x))
y = create_variable(torch.LongTensor([0,1,1]))

model = Net()
loss_function = nn.NLLLoss() 
#print 'Model parameter ', model.parameters()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

if torch.cuda.is_available():
    model.cuda()

for epoch in range(10):

    output = model(x)
    print 'output view: ', output.size()
    print 'y: ', y.size()
    loss = loss_function(output, y.view(-1))

   # total_loss += loss.data[0]
    print 'Loss function: ', loss.data[0]
    model.zero_grad()
    loss.backward()
    optimizer.step()

回溯(最近一次调用最后一次):文件“cuda_test.py”,第52行,在model.cuda()文件中“/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site-packages/torch/ nn / modules / module.py“,第216行,在cuda中返回self._apply(lambda t:t.cuda(device))文件”/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site- packages / torch / nn / modules / module.py“,第146行,在_apply module._apply(fn)文件中”/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site-packages/torch/nn /modules/module.py“,第146行,在_apply module._apply(fn)文件”/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site-packages/torch/nn/modules/rnn . py“,第123行,在_apply self.flatten_parameters()文件”/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site-packages/torch/nn/modules/rnn.py“,第111行,在flatten_parameters params = rnn.get_parameters(fn,handle,fn.weight_buf)文件“/home1/dhanachandra/anaconda3/envs/my_env27/lib/python2.7/site-packages/torch/backends/cudnn/rnn.py”,第165行,在get_parameters中断言filter_dim_a.prod()== filter_dim_a [0] AssertionError

代码在cpu中运行,没有任何错误,但在GPU中出错