python学习(二)

发布时间:2019-05-22 22:11:50编辑:auto阅读(1696)

    #浏览器GET请求,发送的参数有中文时,需要编码再拼接参数发送

    from urllib import request
    import urllib

    url = r"http://www.baidu.com/s?"
    #百度搜索 浏览器useragent时 地址栏为:
    #https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=2&tn=02049043_62_pg&wd=%E6%B5%8F%E8%A7%88%E5%99%A8useragent&rsv_spt=1&oq=%25E6%2588%2591%25E8%25A6%2581%25E8%2587%25AA%25E5%25AD%25A6%25E7%25BD%2591&rsv_pq=807c712a00023178&rsv_t=2f3fUS%2Ba5xoKtOS%2FzBZBB8Rz1nKEJLx3Fa1MR%2B1DEw%2Fl0Z1uH95MMgBBId6Us6mLfF6J1pQ&rqlang=cn&rsv_enter=1&rsv_sug3=12&rsv_sug1=12&rsv_sug7=100&bs=%E6%88%91%E8%A6%81%E8%87%AA%E5%AD%A6%E7%BD%91
    #浏览器地址转码为%E6%B5%8F%E8%A7%88%E5%99%A8useragen
    wd = {"wd":"浏览器useragent"}

    #构造url编码
    wdd = urllib.parse.urlencode(wd)

    print(wdd)
    url = url + wdd
    req = request.Request(url)

    res = request.urlopen(req).read().decode()

    print(res)



    #通过代理去爬取数据
    from urllib import request
    import random

    #百度 西祠代理
    #爬虫在爬数据时,可能回因为同一个ip大量访问被屏蔽,所以需要用代理模拟多台机器

    proxyList = [
    {"http":"171.41.86.127:9999"},
    {"http":"171.41.86.90:9999"},
    {"http":"112.87.70.232:9999"}
    ]

    proxyOne = random.choice(proxyList)

    print(proxyOne)

    #构建代理处理器对象
    proxyHandler = request.ProxyHandler(proxyOne)
    #创建自定义operer
    opener = request.build_opener(proxyHandler)

    url = "http://www.baidu.com"
    #创建请求对象
    req = request.Request(url)
    res = opener.open(req)

    #opener.open(url) 不创建请求对象,直接请求url也是可以的

    print(res.read().decode())



关键字