python搭建代理IP池 您所在的位置:网站首页 Python代理池 python搭建代理IP池

python搭建代理IP池

2023-07-11 19:06| 来源: 网络整理| 查看: 265

自己构建代理池,从各种代理服务网站中获取代理 IP,并检测其可用性(使用一个稳定的网址来检测,最好是自己将要爬取的网站),再保存到数据库中,需要使用的时候再调用

代码地址:链接:https://pan.baidu.com/s/19qFHwYHYR6SLXCMAxry9pQ        提取码:gxeb

1.获取IP

使用的库:requests、pyquery

几家免费的代理服务网站:

创建crawler.py文件

import json import re from Proxy_pool.utils import get_page from pyquery import PyQuery as pq class ProxyMetaclass(type): def __new__(cls, name, bases, attrs): count = 0 attrs['__CrawlFunc__'] = [] for k, v in attrs.items(): if 'crawl_' in k: attrs['__CrawlFunc__'].append(k) count += 1 attrs['__CrawlFuncCount__'] = count return type.__new__(cls, name, bases, attrs) class Crawler(object, metaclass=ProxyMetaclass): def get_proxies(self, callback): proxies = [] for proxy in eval("self.{}()".format(callback)): print('成功获取到代理', proxy) proxies.append(proxy) return proxies def crawl_daili66(self, page_count=4): """ 获取代理66 :param page_count: 页码 :return: 代理 """ start_url = 'http://www.66ip.cn/{}.html' urls = [start_url.format(page) for page in range(1, page_count + 1)] for url in urls: print('Crawling', url) html = get_page(url) if html: doc = pq(html) trs = doc('.containerbox table tr:gt(0)').items() for tr in trs: ip = tr.find('td:nth-child(1)').text() port = tr.find('td:nth-child(2)').text() yield ':'.join([ip, port]) def crawl_xicidaili(self): for i in range(1, 3): start_url = 'http://www.xicidaili.com/nn/{}'.format(i) headers = { 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Cookie':'_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3', 'Host':'www.xicidaili.com', 'Referer':'http://www.xicidaili.com/nn/3', 'Upgrade-Insecure-Requests':'1', } html = get_page(start_url, options=headers) if html: find_trs = re.compile('(.*?)', re.S) trs = find_trs.findall(html) for tr in trs: find_ip = re.compile('(\d+\.\d+\.\d+\.\d+)') re_ip_address = find_ip.findall(tr) find_port = re.compile('(\d+)') re_port = find_port.findall(tr) for address,port in zip(re_ip_address, re_port): address_port = address+':'+port yield address_port.replace(' ','') def crawl_ip3366(self): for i in range(1, 4): start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i) html = get_page(start_url) if html: find_tr = re.compile('(.*?)', re.S) trs = find_tr.findall(html) for s in range(1, len(trs)): find_ip = re.compile('(\d+\.\d+\.\d+\.\d+)') re_ip_address = find_ip.findall(trs[s]) find_port = re.compile('(\d+)') re_port = find_port.findall(trs[s]) for address,port in zip(re_ip_address, re_port): address_port = address+':'+port yield address_port.replace(' ','') def crawl_kuaidaili(self): for i in range(1, 4): start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i) html = get_page(start_url) if html: ip_address = re.compile('(.*?)') re_ip_address = ip_address.findall(html) port = re.compile('(.*?)') re_port = port.findall(html) for address,port in zip(re_ip_address, re_port): address_port = address+':'+port yield address_port.replace(' ','')

可以自己添加要获取的ip代理

创建utils.py文件

import requests from requests.exceptions import ConnectionError base_headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7' } def get_page(url, options={}): """ 抓取代理 :param url: :param options: :return: """ headers = dict(base_headers, **options) print('正在抓取', url) try: response = requests.get(url, headers=headers) print('抓取成功', url, response.status_code) if response.status_code == 200: return response.text except ConnectionError: print('抓取失败', url) return None

抓取网页内容的方法,访问链接成功后返回整个网页 HTML 内容,便于后续对网页具体内容的提取。封装成一个方法,让上面的 crawler 在抓取各个网站时调用

创建getter.py文件

from Proxy_pool.crawler import Crawler from Proxy_pool.db import MySqlClient from Proxy_pool.setting import * import sys class Getter(): def __init__(self): self.mysql = MySqlClient() self.crawler = Crawler() def is_over_threshold(self): """ 判断是否达到了代理池限制 """ if self.mysql.count() >= POOL_UPPER_THRESHOLD: return True else: return False def run(self): print('获取器开始执行') if not self.is_over_threshold(): for callback_label in range(self.crawler.__CrawlFuncCount__): callback = self.crawler.__CrawlFunc__[callback_label] # 获取代理 all_ip = self.crawler.get_proxies(callback) sys.stdout.flush() for ip in all_ip: self.mysql.add(ip)

结果如下:

 已经获取到ip代理,蛋不知道是否可用,也没有保存

2.保存获取到的IP

现在本地或服务器部署mysql,具体部署方法请自行查找。

创建数据库testCREATE database test;进入数据库testuse test;创建表PROXYcreate table PROXY (IP VARCHAR(255),SCORE VARCHAE(255));

创建setting.py文件

先在一个文件中定义一些配置信息,如数据库的设置、一些不变量如满分的数值等

# 数据库地址 HOST = '192.168.98.128' # 填写要连接数据库的ip # MySql端口 MYSQL_PORT = 3306 # MySQl用户名、密码 MYSQL_USERNAME = 'root' MYSQL_PASSWORD = 'RZXrzx1218' # 数据库名 SQL_NAME = 'test' # MAX_SCORE、MIN_SCORE、INITIAL_SCORE 分别代表最大分数、最小分数、初始分数 # 代理等级 MAX_SCORE = 30 MIN_SCORE = 0 INITIAL_SCORE = 10 VALID_STATUS_CODES = [200, 302] # 代理池数量界限 POOL_UPPER_THRESHOLD = 1000 # 检查周期 TESTER_CYCLE = 20 # 获取周期 GETTER_CYCLE = 300 # 测试API,建议抓哪个网站测哪个 TEST_URL = 'http://www.baidu.com' # API配置 API_HOST = '0.0.0.0' API_PORT = 5555 # 开关 TESTER_ENABLED = True GETTER_ENABLED = True API_ENABLED = True # 最大批测试量 BATCH_TEST_SIZE = 30

创建db.py文件

定义一个类来操作数据库的有序集合,内含一些方法来实现分数的设置、代理的获取等

import pymysql from Proxy_pool.error import PoolEmptyError from Proxy_pool.setting import * from random import choice import re class MySqlClient(object): # 初始化 def __init__(self, host=HOST, port=MYSQL_PORT, username=MYSQL_USERNAME, password=MYSQL_PASSWORD, sqlname=SQL_NAME): self.db = pymysql.connect(host=host, user=username, password=password, port=port, db=sqlname) self.cursor = self.db.cursor() # 添加代理IP def add(self, ip, score=INITIAL_SCORE): sql_add = "INSERT INTO PROXY (IP,SCORE) VALUES ('%s', %s)" % (ip, score) if not re.match('\d+\.\d+\.\d+\.\d+\:\d+', ip): print('代理不符合规范', ip, '丢弃') return if not self.exists(ip): self.cursor.execute(sql_add) self.db.commit() return True # 减少代理分数 def decrease(self, ip): sql_get = "SELECT * FROM PROXY WHERE IP='%s'" % (ip) self.cursor.execute(sql_get) score = self.cursor.fetchone()[1] if score and score > MIN_SCORE: print('代理', ip, '当前分数', score, '减 1') sql_change = "UPDATE PROXY SET SCORE = %s WHERE IP = '%s'" % (score-1, ip) else: print('代理', ip, '当前分数', score, '移除') sql_change = "DELETE FROM PROXY WHERE IP = %s" % (ip) self.cursor.execute(sql_change) self.db.commit() # 分数最大化 def max(self, ip): print('代理', ip, '可用,设置为', MAX_SCORE) sql_max = "UPDATE PROXY SET SCORE = %s WHERE IP = '%s'" % (MAX_SCORE, ip) self.cursor.execute(sql_max) self.db.commit() # 随机获取有效代理 def random(self): # 先从满分中随机选一个 sql_max = "SELECT * FROM PROXY WHERE SCORE=%s" % (MAX_SCORE) if self.cursor.execute(sql_max): results = self.cursor.fetchall() return choice(results)[0] # 没有满分则随机选一个 else: sql_all = "SELECT * FROM PROXY WHERE SCORE BETWEEN %s AND %s" % (MIN_SCORE, MAX_SCORE) if self.cursor.execute(sql_all): results = self.cursor.fetchall() return choice(results)[0] else: raise PoolEmptyError # 判断是否存在 def exists(self, ip): sql_exists = "SELECT 1 FROM PROXY WHERE IP='%s' limit 1" % ip return self.cursor.execute(sql_exists) # 获取数量 def count(self): sql_count = "SELECT * FROM PROXY" return self.cursor.execute(sql_count) # 获取全部 def all(self): self.count() return self.cursor.fetchall() # 批量获取 def batch(self, start, stop): sql_batch = "SELECT * FROM PROXY LIMIT %s, %s" % (start, stop - start) self.cursor.execute(sql_batch) return self.cursor.fetchall()

结果:

 3.检测IP

创建tester.py文件

import asyncio import aiohttp import time import sys from aiohttp import ClientError from Proxy_pool.db import MySqlClient from Proxy_pool.setting import * class Tester(object): def __init__(self): self.mysql = MySqlClient() async def test_single_ip(self, ip): """ 测试单个代理 :param ip: :return: """ conn = aiohttp.TCPConnector(verify_ssl=False) async with aiohttp.ClientSession(connector=conn) as session: try: if isinstance(ip, bytes): ip = ip.decode('utf-8') real_ip = 'http://' + ip print('正在测试', ip) async with session.get(TEST_URL, proxy=real_ip, timeout=15, allow_redirects=False) as response: if response.status in VALID_STATUS_CODES: self.mysql.max(ip) print('代理可用', ip) else: self.mysql.decrease(ip) print('请求响应码不合法 ', response.status, 'IP', ip) except (ClientError, aiohttp.client_exceptions.ClientConnectorError, asyncio.TimeoutError, AttributeError): self.mysql.decrease(ip) print('代理请求失败', ip) def run(self): """ 测试主函数 :return: """ print('测试器开始运行') try: count = self.mysql.count() print('当前剩余', count, '个代理') for i in range(0, count, BATCH_TEST_SIZE): start = i stop = min(i + BATCH_TEST_SIZE, count) print('正在测试第', start + 1, '-', stop, '个代理') test_ip_group = self.mysql.batch(start, stop) loop = asyncio.get_event_loop() tasks = [self.test_single_ip(ip_tuple[0]) for ip_tuple in test_ip_group] loop.run_until_complete(asyncio.wait(tasks)) sys.stdout.flush() time.sleep(5) except Exception as e: print('测试器发生错误', e.args)

结果:

 4.定义接口

创建api.py

from flask import Flask, g from Proxy_pool.db import MySqlClient __all__ = ['app'] app = Flask(__name__) def get_conn(): if not hasattr(g, 'mysql'): g.mysql = MySqlClient() return g.mysql @app.route('/') def index(): return 'Welcome to Proxy Pool System' @app.route('/random') def get_proxy(): """ Get a proxy :return: 随机代理 """ conn = get_conn() return conn.random() @app.route('/count') def get_counts(): """ Get the count of proxies :return: 代理池总量 """ conn = get_conn() return str(conn.count())

结果:

 

5.调度模块

调用定义的获取、存储、检测三个模块,将这三个模块通过多进程的形式运行起来

创建scheduler.py

import time from multiprocessing import Process from Proxy_pool.api import app from Proxy_pool.getter import Getter from Proxy_pool.tester import Tester from Proxy_pool.db import MySqlClient from Proxy_pool.setting import * class Scheduler(): def schedule_tester(self, cycle=TESTER_CYCLE): """ 定时测试代理 """ tester = Tester() while True: print('测试器开始运行') tester.run() time.sleep(cycle) def schedule_getter(self, cycle=GETTER_CYCLE): """ 定时获取代理 """ getter = Getter() while True: print('开始抓取代理') getter.run() time.sleep(cycle) def schedule_api(self): """ 开启API """ app.run(API_HOST, API_PORT) def run(self): print('代理池开始运行') if TESTER_ENABLED: tester_process = Process(target=self.schedule_tester) tester_process.start() if GETTER_ENABLED: getter_process = Process(target=self.schedule_getter) getter_process.start() if API_ENABLED: api_process = Process(target=self.schedule_api) api_process.start()

分别判断了三个模块的开关,如果开启的话,就新建一个 Process 进程,设置好启动目标,然后调用 start() 方法运行,这样三个进程就可以并行执行,互不干扰

6.启动

创建run.py 方法

from Proxy_pool.scheduler import Scheduler import sys import io sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') def main(): try: s = Scheduler() s.run() except: main() if __name__ == '__main__': main()

 



【本文地址】

公司简介

联系我们

今日新闻

    推荐新闻

    专题文章
      CopyRight 2018-2019 实验室设备网 版权所有