requests是一个很实用的Python HTTP客户端库,编写爬虫和测试服务器响应数据时
经常会用到,Requests是Python语言的第三方的库,专门用于发送HTTP请求
要下载requests
pip install requests
1.无参数请求
r=requests.get('http://www.baidu.com')
2.有参数请求
payload = {'key1': 'value1', 'key2': 'value2', 'key3': None}
r = requests.get('http://www.baidu.com ', params=payload)
案例:
import requests
class UseRequest():
# get传参的第一种方式
def GetMethod(self):
r=requests.get("http://v.juhe.cn/toutiao/index?type=guonei&key=4b72107"
"de3a197b3bafd9adacf685790")
print(r.text)
# get传参的第二种方式
def GetMethod1(self):
params = {"type": "guonei", "key": "4b72107de3a197b3bafd9adacf685790"}
r = requests.get("http://v.juhe.cn/toutiao/index", params=params)
print(r)
u=UseRequest()
u.GetMethod()
u.GetMethod1()
类似python中的表单提交
payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.post("http://httpbin.org/post", data=payload)
案例:
import requests
class UseRequest():
# post传参的方式
def PostMethod(self):
params = {"type": "guonei", "key": "4b72107de3a197b3bafd9adacf685790"}
r = requests.post("http://v.juhe.cn/toutiao/index", data=params)
print(r.status_code)
# return r.status_code
u=UseRequest()
u.PostMethod()
print(r.status_code) # 响应状态码
print(r.headers) # 响应头
print(r.cookies) # 响应cookie
print(r.text) #响应文本
print(r.encoding) # 当前编码
print(r.content) #以字节形式(二进制)返回
最常用的是根据响应状态码判断接口是否连通,经常用于做接口中断言判断
1:添加等待时间
requests.get(url,timeout=1) #超过等待时间则报错
2:添加请求头信息
requests.get(url,headers=headers) #设置请求头
3:添加文件
requests.post(url, files=files) #添加文件
文件传输
url = 'http://httpbin.org/post'
files = {'file': open('report.xls', 'rb')}
r = requests.post(url, files=files)
流程如下
读取文件中的数据
requests拿到数据请求接口返回状态码
通过断言验证返回状态码和200对比
生成allure的测试报告
读取csv文件流程
1.存储数据
2.读取数据
import csv
class ReadCsv():
def readCsv(self):
item = []
rr = csv.reader(open("../data/test_csv.csv"))
for csv_i in rr:
item.append(csv_i)
del item[0]
return item
r=ReadCsv()
print(r.readCsv())
3.requests请求接口返回状态码
import requests
from readdata.readcsv import ReadCsv
r=ReadCsv()
lists=r.readCsv()
items=[]
class RequestsClass():
def requestMethod(self):
for i in lists:
if i[2]=='get':
r=requests.get(i[0],params=i[1])
items.append(r.status_code)
else:
r = requests.post(i[0], data=i[1])
items.append(r.status_code)
return items
req=RequestsClass()
print(req.requestMethod())
4.pytest断言设置并结合allure生成测试报告
import pytest,os
from requestsdemo.requestcsv import RequestsClass
r=RequestsClass()
rr=r.requestMethod()
class TestRequestsClass():
def testCsv(self):
for i in rr:
assert i==200
if __name__ == '__main__':
pytest.main(['--alluredir', 'report/result', 'test_csv.py'])
split = 'allure ' + 'generate ' + './report/result ' + '-o ' + './report/html ' + '--clean'
os.system(split)
5.测试报告展示
读取excel文件流程
1.存储数据
2.读取数据
from openpyxl import load_workbook
class UseExcel():
def get_TestExcel(self):
# 打开表
workbook = load_workbook('../data/test_exc.xlsx')
# 定位表单
sheet = workbook['Sheet1']
print(sheet.max_row) #3 行
print(sheet.max_column) #3 列
test_data = []#把所有行的数据放到列表中
for i in range(2,sheet.max_row+1):
sub_data = {}#把每行的数据放到字典中
for j in range(1,sheet.max_column+1):
sub_data[sheet.cell(1,j).value] = sheet.cell(i,j).value
test_data.append(sub_data)#拼接每行单元格的数据
return test_data
t = UseExcel()
f = t.get_TestExcel()
print(f)
3.requests请求接口返回状态码
import requests
from readdata.readexcel import UseExcel
u=UseExcel()
lists=u.get_TestExcel()
items=[]
class RequestsExcClass():
def requestExcMethod(self):
for i in lists:
if i.get('method')=='get':
r=requests.get(i['url'],params=i['params'])
items.append(r.status_code)
else:
r = requests.post(i['url'], data=i['params'])
items.append(r.status_code)
return items
r=RequestsExcClass()
print(r.requestExcMethod())
4.pytest断言设置并结合allure生成测试报告
import pytest,os
from requestsdemo.requestexc import RequestsExcClass
r=RequestsExcClass()
lists=r.requestExcMethod()
class TestRequestsClass():
def testMethod(self):
for i in lists:
assert i==200
if __name__ == '__main__':
# 生成测试报告json
pytest.main(['--alluredir', 'report/result', 'test_exc.py'])
# 将测试报告转为html格式
split = 'allure ' + 'generate ' + './report/result ' + '-o ' + './report/html ' + '--clean'
os.system(split)
5.测试报告展示
版权说明 : 本文为转载文章, 版权归原作者所有 版权申明
原文链接 : https://blog.csdn.net/weixin_45043349/article/details/120696244
内容来源于网络,如有侵权,请联系作者删除!