Pytorch RuntimeError:参数#1'索引'的期望张量具有标量类型Long;但取而代之的是CUDAType [英] Pytorch RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got CUDAType instead

查看:64
本文介绍了Pytorch RuntimeError:参数#1'索引'的期望张量具有标量类型Long;但取而代之的是CUDAType的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我试图通过嵌入在计算机上重新执行GitHub项目以进行推荐,目的是首先嵌入在movieLens数据集中存在的用户和项目,然后使用内部产品来预测收视率.完成所有组件的集成后,我在培训中出现了错误.

I am trying to re-execute a GitHub project on my computer for recommendation using embedding, the goal is to first embed the user and item present in the movieLens dataset, and then use the inner product to predict a rating, when I finished the integration of all components, I got an error in the training.

代码:

from lightfm.datasets import fetch_movielens
movielens = fetch_movielens()
ratings_train, ratings_test = movielens['train'], movielens['test']
def _binarize(dataset):

    dataset = dataset.copy()

    dataset.data = (dataset.data >= 0.0).astype(np.float32)
    dataset = dataset.tocsr()
    dataset.eliminate_zeros()

    return dataset.tocoo()
train, test = _binarize(movielens['train']), _binarize(movielens['test'])
class ScaledEmbedding(nn.Embedding):
    """ Change the scale from normal to [0,1/embedding_dim] """
    def reset_parameters(self):
        self.weight.data.normal_(0, 1.0 / self.embedding_dim)
        if self.padding_idx is not None:
            self.weight.data[self.padding_idx].fill_(0)


class ZeroEmbedding(nn.Embedding):

    def reset_parameters(self):
        self.weight.data.zero_()
        if self.padding_idx is not None:
            self.weight.data[self.padding_idx].fill_(0)
class BilinearNet(nn.Module):

    def __init__(self, num_users, num_items, embedding_dim, sparse=False):
        super().__init__()

        self.embedding_dim = embedding_dim

        self.user_embeddings = ScaledEmbedding(num_users, embedding_dim,
                                               sparse=sparse)
        self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
                                               sparse=sparse)
        self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
        self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)

    def forward(self, user_ids, item_ids):

        user_embedding = self.user_embeddings(user_ids)
        item_embedding = self.item_embeddings(item_ids)

        user_embedding = user_embedding.view(-1, self.embedding_dim)
        item_embedding = item_embedding.view(-1, self.embedding_dim)

        user_bias = self.user_biases(user_ids).view(-1, 1)
        item_bias = self.item_biases(item_ids).view(-1, 1)

        dot = (user_embedding * item_embedding).sum(1)

        return dot + user_bias + item_bias

def pointwise_loss(net,users, items, ratings, num_items):

    negatives = Variable(
            torch.from_numpy(np.random.randint(0,
                                               num_items,
                                                  len(users))).cuda()
    )

    positives_loss = (1.0 - torch.sigmoid(net(users, items)))
    negatives_loss = torch.sigmoid(net(users, negatives))

    return torch.cat([positives_loss, negatives_loss]).mean()

embedding_dim = 128
minibatch_size = 1024
n_iter = 10
l2=0.0
sparse = True

num_users, num_items = train.shape
net = BilinearNet(num_users,
                            num_items,
                            embedding_dim,
                            sparse=sparse).cuda()

optimizer = optim.Adagrad(net.parameters(),
                              weight_decay=l2)
for epoch_num in range(n_iter):

    users, items, ratings = shuffle(train)

    user_ids_tensor = torch.from_numpy(users).cuda()
    item_ids_tensor = torch.from_numpy(items).cuda()
    ratings_tensor = torch.from_numpy(ratings).cuda()

    epoch_loss = 0.0

    for (batch_user,
         batch_item,
         batch_ratings) in zip(_minibatch(user_ids_tensor,
                                          minibatch_size),
                               _minibatch(item_ids_tensor,
                                          minibatch_size),
                               _minibatch(ratings_tensor,
                                          minibatch_size)):

        user_var = Variable(batch_user)
        item_var = Variable(batch_item)
        ratings_var = Variable(batch_ratings)
        optimizer.zero_grad()
        loss = pointwise_loss(net,user_var, item_var, ratings_var, num_items)
        epoch_loss += loss.data[0]
        loss.backward()
        optimizer.step()
        print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))

错误:

RuntimeError Traceback (most recent call last) <ipython-input-87-dcd04440363f> in <module>()
             22         ratings_var = Variable(batch_ratings)
             23         optimizer.zero_grad()
        ---> 24         loss = pointwise_loss(net,user_var, item_var, ratings_var, num_items)
             25         epoch_loss += loss.data[0]
             26         loss.backward()

        <ipython-input-86-679e10f637a5> in pointwise_loss(net, users, items, ratings, num_items)
              8 
              9     positives_loss = (1.0 - torch.sigmoid(net(users, items)))
        ---> 10     negatives_loss = torch.sigmoid(net(users, negatives))
             11 
             12     return torch.cat([positives_loss, negatives_loss]).mean()

        ~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in
        __call__(self, *input, **kwargs)
            491             result = self._slow_forward(*input, **kwargs)
            492         else:
        --> 493             result = self.forward(*input, **kwargs)
            494         for hook in self._forward_hooks.values():
            495             hook_result = hook(self, input, result)

        <ipython-input-58-3946abf81d81> in forward(self, user_ids, item_ids)
             16 
             17         user_embedding = self.user_embeddings(user_ids)
        ---> 18         item_embedding = self.item_embeddings(item_ids)
             19 
             20         user_embedding = user_embedding.view(-1, self.embedding_dim)

        ~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in
        __call__(self, *input, **kwargs)
            491             result = self._slow_forward(*input, **kwargs)
            492         else:
        --> 493             result = self.forward(*input, **kwargs)
            494         for hook in self._forward_hooks.values():
            495             hook_result = hook(self, input, result)

        ~\Anaconda3\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
            115         return F.embedding(
            116             input, self.weight, self.padding_idx, self.max_norm,
        --> 117             self.norm_type, self.scale_grad_by_freq, self.sparse)
            118 
            119     def extra_repr(self):

        ~\Anaconda3\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)    1504         # remove once script supports set_grad_enabled    1505        
        _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
        -> 1506     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)    1507     1508 

        RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got CUDAType instead (while checking arguments for embedding)

有人可以帮我吗?

推荐答案

我建议您检查输入类型我有同样的问题,可以通过将输入类型从int32转换为int64来解决.(在win10上运行)例如:

I would suggest you to check the input type I had the same issue which solved by converting the input type from int32 to int64.(running on win10) ex:

x = torch.tensor(train).to(torch.int64)

这篇关于Pytorch RuntimeError:参数#1'索引'的期望张量具有标量类型Long;但取而代之的是CUDAType的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
相关文章
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆