300字范文,内容丰富有趣,生活中的好帮手!
300字范文 > 使用python+selenium批量提取群成员QQ

使用python+selenium批量提取群成员QQ

时间:2021-10-08 22:02:46

相关推荐

使用python+selenium批量提取群成员QQ

声明:此方法禁止进行违法用途,否则后果自负

1.环境配置

(1)python 3.7

(2)使用pip 安装selenium

(3)下载Chrome浏览器,并下载对应版本chromedriver

2.代码

import osimport reimport timeimport datetimefrom selenium import webdriverfrom selenium.webdriver.support.ui import WebDriverWaitfrom selenium.webdriver.support import expected_conditions as ECfrom mon.by import Bydef scroll_foot(browser):js = 'var q=document.documentElement.scrollTop=100000'return browser.execute_script(js)def get_qq(browser,qq_file):trs = browser.find_elements_by_class_name('mb')if trs:for i,tr in enumerate(trs):tds = tr.find_elements_by_tag_name('td')[2:]qq = tds[2].textqq_file.writelines(qq+'\n')return idef extractor(browser,qq_list_path):current_len = 0while current_len < len(browser.page_source):current_len = len(browser.page_source)scroll_foot(browser)time.sleep(1.0)qq_list_file = open(qq_list_path,'w+')member_num = get_qq(browser,qq_list_file)qq_list_file.close()return member_numdef login_spider(exe_path, url):browser = webdriver.Chrome(exe_path)# 请求urlbrowser.get(url)# 模拟登陆,首先找到登陆的id,并点击browser.find_element_by_css_selector('#headerInfo p a').click()WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '#loginWin iframe')))print('登陆框已加载')iframe_url = browser.find_element_by_css_selector('#loginWin iframe').get_attribute('src')# 再访问这个urlbrowser.get(iframe_url)# 找到快捷登陆的头像并点击# 首先用显示等待这个头像已经加载完成WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.ID, 'qlogin_list')))browser.find_element_by_css_selector('#qlogin_list a').click()print('登陆成功')return browserdef switch_spider(browser):# 登陆成功之后,我们就找到群管理的标签并点击,首先等待这个元素加载完成WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.XPATH, './/ul[@id="headerNav"]/li[4]')))browser.find_element_by_xpath('.//ul[@id="headerNav"]/li[4]').click()# 点击之后,我们找到成员管理标签并点击WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'color-tit')))browser.find_element_by_class_name('color-tit').click()browser.switch_to.window(browser.window_handles[1])return browserdef start_spider(browser,dir):WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'my-all-group')))# 筛选出我加入的群标签lis = browser.find_elements_by_xpath('.//div[@class="my-all-group"]/ul[1]/li')group_num = len(lis)for idx in range(group_num):try:lis[idx].click()name_and_id = browser.find_element_by_id('groupTit').textname = name_and_id.split('(')[0]id = name_and_id.split('(')[1].split(')')[0]qq_list_path = dir + '/' + name + '_' + idprint('开始提取[' + name_and_id + ']')member_num = extractor(browser,qq_list_path)print('提取[' + name_and_id + ']成功:' + str(member_num) + '人')browser.find_element_by_id('changeGroup').click()WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'ui-dialog')))lis = browser.find_elements_by_xpath('.//div[@class="my-all-group"]/ul[1]/li')except Exception as e:continuedef combine(dir,file_name):qq_list = []dest_path = dir + '/' + file_namedest_file = open(dest_path, 'w+')list = os.listdir(dir) # 列出文件夹下所有的目录与文件re_count = 0for file_name in list:path = os.path.join(dir, file_name)for line in open(path, 'r'):line = line.strip().split()[0]if line not in qq_list:qq_list.append(line)dest_file.write(line + '\n')else:re_count += 1dest_file.close()print('合并成功,共' + str(len(qq_list)) + '人,去除重复' + str(re_count))if __name__ =='__main__':url = '/'exe_path = 'C:/attachment/chromedriver.exe'# 构建谷歌驱动器now = datetime.datetime.today().strftime("%Y%m%d")dir = 'dataset_' + nowfile_name = 'all_qq.txt'try: ## 创建一个文件夹,用于存放数据集。文件夹命令方式:dataset + yyyymmdd(本日日期)os.mkdir(dir)except: ## 如果文件夹已存在,则放弃创建passbrowser = login_spider(exe_path, url)switch_spider(browser)start_spider(browser,dir)browser.quit()combine(dir, file_name)

参考:

[1]Python爬虫使用selenium爬取qq群的成员信息(全自动实现自动登陆)[博客园]

[2] 教你用python爬取自己加入的QQ群成员名单 [知乎]

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。