python - 用scrapy爬取网站内容时,如何忽略某些内容为空的值;

查看:554
本文介绍了python - 用scrapy爬取网站内容时,如何忽略某些内容为空的值;的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

问 题

我爬取京东上所有手机信息时会碰到如下问题:
1、返回值过多,如下图片所示:

2、spider代码如下:

-- coding: utf-8 --

import scrapy
from scrapy.http import Request
from ueinfo.items import UeinfoItem

class MrueSpider(scrapy.Spider):

name = 'mrue'
allowed_domains = ['jd.com']
start_urls = ['http://jd.com/']

def parse(self, response):
    key="手机"
    for i in range(1,2):
        url="https://search.jd.com/Search?keyword="+str(key)+"&enc=utf-8&page="+str((i*2)-1)
        #print(url)
        yield Request(url=url,callback=self.page)
def page(self,response):
    #body=response.body.decode("utf-8","ignore")
    allid=response.xpath("//div[@class='p-focus']//a/@data-sku").extract()
    for j in range(0,len(allid)):
        thisid=allid[j]
        url1="https://item.jd.com/"+str(thisid)+".html"
        #print(url1)
        yield Request(url=url1,callback=self.next)
def next(self,response):
    item=UeinfoItem()
    item["pinpai"]=response.xpath("//ul[@id='parameter-brand']/li/@title").extract()
    #print(item["pinpai"])
    item["xinghao"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='型号']/following::*[1]").extract()
    #print(item["xinghao"])
    item["nianfen"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='上市年份']/following::*[1]").extract()
    #print(item["nianfen"])
    item["yuefen"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='上市月份']/following::*[1]").extract()
    #print(item["yuefen"])
    item["caozuoxt"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='操作系统']/following::*[1]").extract()
    #print(item["caozuoxt"])
    item["cpupp"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='CPU品牌']/following::*[1]").extract()
    #print(item["cpupp"])
    item["cpuhs"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='CPU核数']/following::*[1]").extract()
    #print(item["cpuhs"])
    item["cpuxh"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='CPU型号']/following::*[1]").extract()
    #print(item["cpuxh"])
    item["shuangkalx"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='双卡机类型']/following::*[1]").extract()
    #print(item["shuangkalx"])
    item["mfnetwangl"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='4G网络']/following::*[1]").extract()
    #print(item["mfnetwangl"])
    item["fnetwangl"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='网络频率(4G)']/following::*[1]").extract()
    #print(item["fnetwangl"])
    item["netwanglplus"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='指纹识别']/following::*[1]").extract()
    #print(item["netwanglplus"])
    item["volte"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='高清语音通话(VOLTE)']/following::*[1]").extract()
    #print(item["volte"])
    item["screenstyle"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='主屏幕尺寸(英寸)']/following::*[1]").extract()
    #print(item["screenstyle"])
    item["fenbiel"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='分辨率']/following::*[1]").extract()
    #print(item["fenbiel"])
    item["dianchirl"]=response.xpath("//div[@class='Ptable']//div[@class='Ptable-item']//dl//dt[text()='电池容量(mAh)']/following::*[1]").extract()
    #print(item["dianchirl"])
    yield item

pipelines的代码如下:

-- coding: utf-8 --

import pymysql

class UeinfoPipeline(object):

def __init__(self):
    self.conn=pymysql.connect(host="127.0.0.1",user="root",passwd="root",db="mysql")
def process_item(self, item, spider):
    try:
        pinpai=item["pinpai"][0]
        xinghao=item["xinghao"][0]
        nianfen=item["nianfen"][0]
        yuefen=item["yuefen"][0]
        caozuoxt=item["caozuoxt"][0]
        coupp=item["cpupp"][0]
        cpuhs=item["cpuhs"][0]
        cpuxh=item["cpuxh"][0]
        shuangkalx=item["shuangkalx"][0]
        mfnetwangl=item["mfnetwangl"][0]
        fnetwangl = item["fnetwangl"][0]
        netwanglplus=item["netwanglplus"][0]
        volte=item["volte"][0]
        screenstyle=item["screenstyle"][0]
        fenbiel=item["fenbiel"][0]
        dianchirl=item["dianchirl"][0]
        sql="insert into uems(pinpai,xinghao,nianfen,yuefen,caozuoxt,cpupp,cpuhs,cpuxh,shuangkalx,mwangluo,fwangluo,wangluoplus,volte,screenstyle,fenbian,dianchi)VALUES('"+pinpai+"','"+xinghao+"','"+nianfen+"','"+yuefen+"','"+caozuoxt+"','"+coupp+"','"+cpuhs+"','"+cpuxh+"','"+shuangkalx+"','"+mfnetwangl+"','"+fnetwangl+"','"+netwanglplus+"','"+volte+"','"+screenstyle+"','"+fenbiel+"','"+dianchirl+"')"
        self.conn.query(sql)
        #print(mfnetwangl)
        return item
    except Exception as err:
        pass
def close_spider(self):
    self.conn.close()

解决方案

pipelines中的

def close方法定义错误了

应为这样

def close(self, spider)

至于忽略某些内容为空的值
用for 可能节省代码!

def process_item(self, item, spider):
    for k,v in item.items():
        if v == '':
            raise DropItem(repr(item))

这篇关于python - 用scrapy爬取网站内容时,如何忽略某些内容为空的值;的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆