Python爬虫:requests库基本使用

简介: Python爬虫:requests库基本使用

requests 基于urlib库

pip install requests

用于http测试的网站:http://httpbin.org/


需要导入的模块


import requests
from requests.models import Response

简单测试


def foo1():
    response = requests.get("http://www.baidu.com")
    print(type(response))
    print(response.status_code)
    print(type(response.text))
    print(len(response.text))
    print(response.cookies)
    """
    <class 'requests.models.Response'>
    200
    <class 'str'>
    2381
    <RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
    """

带参数的get


def foo2():
    response = requests.get("http://httpbin.org/get?name=Tom&age=20")
    print(response.text)
# 用字典构造参数
def foo3():
    data ={
        "name": "Tom",
        "age": 20
    }
    response = requests.get("http://httpbin.org/get", params=data)
    print(response.text)

解析json


def foo4():
    import json
    response = requests.get("http://httpbin.org/get")
    print(type(response.text))
    print(type(response.json()))
    print(type(json.loads(response.text)))
    """
    <class 'str'>
    <class 'dict'>
    <class 'dict'>
    """

保存二进制文件


def foo5():
    response = requests.get("https://github.com/favicon.ico")
    print(type(response.text))
    print(type(response.content))
    print(response.text)
    """
    <class 'str'>
    <class 'bytes'>
    """
    # 保存
    with open("favicon.ico", "wb") as f:
        f.write(response.content)
        f.close()

添加请求头headers


def foo6():
    response = requests.get("https://zhuanlan.zhihu.com/p/36085437")
    print(response.status_code)  # 500
    headers = {
        "User-Agent": "Mozilla/5.0"
    }
    response = requests.get("https://zhuanlan.zhihu.com/p/36085437",
                            headers=headers)
    print(response.status_code)  # 200

post请求


def foo7():
    data ={
        "name": "Tom",
        "age": 20
        }
    headers= {
        "User-Agent": "Mozilla/5.0"
    }
    response = requests.post("http://httpbin.org/post",
                             data=data,
                             headers=headers)
    print(response.text)

response响应


def foo8():
    response = requests.get("http://httpbin.org/get")
    print(type(response))
    print(type(response.headers))
    print(type(response.cookies))
    print(type(response.text))
    print(type(response.content))
    print(type(response.status_code))
    print(response.status_code)
    print(response.url)
    print(response.history)
    print(response.reason)
    print(response.encoding)
    print(response.apparent_encoding)
    print(response.request)
    """
    <class 'requests.models.Response'>
    <class 'requests.structures.CaseInsensitiveDict'>
    <class 'requests.cookies.RequestsCookieJar'>
    <class 'str'>
    <class 'bytes'>
    <class 'int'>
    200
   http://httpbin.org/get
    []
    OK
    None
    ascii
    <PreparedRequest [GET]>
    """

状态码判断


def foo9():
    response = requests.get("http://httpbin.org/get")
    print(response.status_code == requests.codes.ok)
    print(response.status_code == 200)
    # True
    # True
"""
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',),  # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
"""

文件上传


def foo10():
    files = {"file": open("cookie.txt", "rb")}
    response = requests.post("http://httpbin.org/post", files=files)
    print(response.status_code)
    print(response.text)

获取cookies


def foo11():
    response = requests.get("http://www.baidu.com")
    print(response.cookies)
    for key, value in response.cookies.items():
        print("{key} = {value}".format(key=key, value=value))
    """
    <RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
    BDORZ = 27315
    """
# 用cookie做会话保持,模拟登陆
def foo12():
    # 第二次get获取不到cookie,相当于两个浏览器分别请求
    response = requests.get("http://httpbin.org/cookies/set/number/123456789")
    print(response.text)
    # "number": "123456789"
    response = requests.get("http://httpbin.org/cookies")
    print(response.text)

session

session对象可以模拟浏览器请求,保存cookie


# 两次请求相当于一个浏览器请求,维持了会话
def foo13():
    session = requests.Session()
    response = session.get("http://httpbin.org/cookies/set/number/123456789")
    print(response.text)
    # "number": "123456789"
    response = session.get("http://httpbin.org/cookies")
    print(response.text)
    # "number": "123456789"

证书验证

def foo14():
    response = requests.get("https://www.12306.cn/")
    # CERTIFICATE_VERIFY_FAILED
    print(response.status_code)
# 取消验证
def foo15():
    from requests.packages import urllib3
    urllib3.disable_warnings() # 消除 warning
    response = requests.get("https://www.12306.cn/", verify=False)
    # InsecureRequestWarning
    print(response.status_code) # 200
# 指定cert证书
def foo16():
    response = requests.get("https://www.12306.cn/", cert=("path"))
    print(response.status_code)  # 200

使用代理

def foo17():
    proxies = {
        "http": "http://127.0.0.1:8000",
        "https": "https://127.0.0.1:8000"
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200
# 使用需要登录的代理
def foo18():
    proxies = {
        "http": "http://user:password@127.0.0.1:8000"
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200
# 使用socks代理
# 安装: pip install requests[socks]
def foo19():
    proxies = {
        "http": "socks5://127.0.0.1:8000",
        "https": "socks5://127.0.0.1:8000",
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200

超时设置

def foo20():
    from requests.exceptions import ConnectTimeout
    try:
        response = requests.get("http://www.goole.com/", timeout=0.1)
        print(response.status_code)
    except ConnectTimeout as e:
        print("链接超时")
    # requests.exceptions.ReadTimeout

登录验证

def foo21():
    from requests.auth import HTTPBasicAuth
    response = requests.get("http://www.goole.com/",
                        auth=HTTPBasicAuth("user", "password"))
    print(response.status_code)
# 也可以
def foo22():
    response = requests.get("http://www.goole.com/",
                        auth=("user", "password"))
    print(response.status_code)
def foo21():
    from requests.auth import HTTPBasicAuth
    response = requests.get("http://www.goole.com/",
                        auth=HTTPBasicAuth("user", "password"))
    print(response.status_code)
# 也可以
def foo22():
    response = requests.get("http://www.goole.com/",
                        auth=("user", "password"))
    print(response.status_code)

异常处理

# 原则:从子类开始捕捉,然后是父类,最后捕捉基类
# 参考:http://cn.python-requests.org/zh_CN/latest/_modules/requests/exceptions.html
def foo23():
    from requests.exceptions import ConnectTimeout, Timeout, RequestException
    try:
        response = requests.get("http://www.google.com/", timeout=0.1)
        print(response.status_code)
    except ConnectTimeout:  # 子类异常
        print("ConnectTimeout")
    except Timeout:  # 父类异常
        print("Timeout")
    except RequestException: # 基类异常
        print("RequestException")
相关文章
|
11天前
|
数据采集 存储 API
网络爬虫与数据采集:使用Python自动化获取网页数据
【4月更文挑战第12天】本文介绍了Python网络爬虫的基础知识,包括网络爬虫概念(请求网页、解析、存储数据和处理异常)和Python常用的爬虫库requests(发送HTTP请求)与BeautifulSoup(解析HTML)。通过基本流程示例展示了如何导入库、发送请求、解析网页、提取数据、存储数据及处理异常。还提到了Python爬虫的实际应用,如获取新闻数据和商品信息。
|
1天前
|
数据处理 Python
如何使用Python的Pandas库进行数据排序和排名
【4月更文挑战第22天】Pandas Python库提供数据排序和排名功能。使用`sort_values()`按列进行升序或降序排序,如`df.sort_values(by=&#39;A&#39;, ascending=False)`。`rank()`函数用于计算排名,如`df[&#39;A&#39;].rank(ascending=False)`。多列操作可传入列名列表,如`df.sort_values(by=[&#39;A&#39;, &#39;B&#39;], ascending=[True, False])`和分别对&#39;A&#39;、&#39;B&#39;列排名。
10 2
|
2天前
|
算法 Python
请解释Python中的关联规则挖掘以及如何使用Sklearn库实现它。
使用Python的mlxtend库,可以通过Apriori算法进行关联规则挖掘。首先导入TransactionEncoder和apriori等模块,然后准备数据集(如购买行为列表)。对数据集编码并转换后,应用Apriori算法找到频繁项集(设置最小支持度)。最后,生成关联规则并计算置信度(设定最小置信度阈值)。通过调整这些参数可以优化结果。
24 9
|
2天前
|
Python
如何使用Python的Pandas库进行数据缺失值处理?
Pandas在Python中提供多种处理缺失值的方法:1) 使用`isnull()`检查;2) `dropna()`删除含缺失值的行或列;3) `fillna()`用常数、前后值填充;4) `interpolate()`进行插值填充。根据需求选择合适的方法处理数据缺失。
26 9
|
2天前
|
索引 Python
如何在Python中使用Pandas库进行季节性调整?
在Python中使用Pandas和Statsmodels进行季节性调整的步骤包括:导入pandas和seasonal_decompose模块,准备时间序列DataFrame,调用`seasonal_decompose()`函数分解数据为趋势、季节性和残差,可选地绘制图表分析,以及根据需求去除季节性影响(如将原始数据减去季节性成分)。这是对时间序列数据进行季节性分析的基础流程。
16 2
|
3天前
|
数据挖掘 API 数据安全/隐私保护
python请求模块requests如何添加代理ip
python请求模块requests如何添加代理ip
|
3天前
|
缓存 自然语言处理 数据处理
Python自然语言处理面试:NLTK、SpaCy与Hugging Face库详解
【4月更文挑战第16天】本文介绍了Python NLP面试中NLTK、SpaCy和Hugging Face库的常见问题和易错点。通过示例代码展示了如何进行分词、词性标注、命名实体识别、相似度计算、依存关系分析、文本分类及预训练模型调用等任务。重点强调了理解库功能、预处理、模型选择、性能优化和模型解释性的重要性,帮助面试者提升NLP技术展示。
20 5
|
4天前
|
数据采集 JavaScript 前端开发
使用Python打造爬虫程序之破茧而出:Python爬虫遭遇反爬虫机制及应对策略
【4月更文挑战第19天】本文探讨了Python爬虫应对反爬虫机制的策略。常见的反爬虫机制包括User-Agent检测、IP限制、动态加载内容、验证码验证和Cookie跟踪。应对策略包括设置合理User-Agent、使用代理IP、处理动态加载内容、验证码识别及维护Cookie。此外,还提到高级策略如降低请求频率、模拟人类行为、分布式爬虫和学习网站规则。开发者需不断学习新策略,同时遵守规则和法律法规,确保爬虫的稳定性和合法性。
|
4天前
|
Python
如何使用Python的Plotly库创建交互式图表?
Plotly是Python的交互式图表库,支持多种图表类型,如折线图、散点图、柱状图。使用步骤包括安装库、导入模块、准备数据、创建图表对象、添加数据和设置属性,最后显示或保存图表。
16 6
|
4天前
|
机器学习/深度学习 数据采集 算法
请解释Python中的Sklearn库以及它的主要用途。
Sklearn是Python的机器学习库,提供数据预处理、特征选择、分类回归、聚类、模型评估和参数调优等工具。包含监督和无监督学习算法,如SVM、决策树、K-means等,并提供样例数据集便于实践。它是进行机器学习项目的重要资源。
11 1