aW1wb3J0IHJlcXVlc3RzCmZyb20gYnM0IGltcG9ydCBCZWF1dGlmdWxTb3VwCmltcG9ydCBwYW5kYXMgYXMgcGQKCiMg5a6a5LmJ55uu5qCHVVJMCmJhc2VfdXJsID0gJnF1b3Q7aHR0cHM6Ly93Li4uY29udGVudC1hdmFpbGFibGUtdG8tYXV0aG9yLW9ubHkuLi52LmNuL3lna2ovd3FramdnL3NzcS8mcXVvdDsKCiMg5Yid5aeL5YyW5pWw5o2u5a2Y5YKoCmRhdGEgPSBbXQoKIyDpobXnoIHlvqrnjq/mipPlj5YKZm9yIHBhZ2UgaW4gcmFuZ2UoMSwgNSk6ICAjIOS/ruaUueiMg+WbtOS7peWMheWQq+abtOWkmumhtQogICAgdXJsID0gZiZxdW90O3tiYXNlX3VybH0/cGFnZT17cGFnZX0mcXVvdDsKICAgIHJlc3BvbnNlID0gcmVxdWVzdHMuZ2V0KHVybCkKICAgIHNvdXAgPSBCZWF1dGlmdWxTb3VwKHJlc3BvbnNlLmNvbnRlbnQsICZxdW90O2h0bWwucGFyc2VyJnF1b3Q7KQogICAgCiAgICAjIOafpeaJvuaVsOaNruihqOagvAogICAgdGFibGUgPSBzb3VwLmZpbmQoJnF1b3Q7dGFibGUmcXVvdDspICAjIOagueaNrue9keermeihqOagvOe7k+aehOiwg+aVtOmAieaLqeWZqAogICAgcm93cyA9IHRhYmxlLmZpbmRfYWxsKCZxdW90O3RyJnF1b3Q7KVsxOl0gICMg6Lez6L+H5qCH6aKY6KGMCgogICAgZm9yIHJvdyBpbiByb3dzOgogICAgICAgIGNlbGxzID0gcm93LmZpbmRfYWxsKCZxdW90O3RkJnF1b3Q7KQogICAgICAgIGlmIGxlbihjZWxscykgPT0gODogICMg56Gu5L+d5a2X5q615pWw6YeP5q2j56GuCiAgICAgICAgICAgIHBlcmlvZCA9IGNlbGxzWzBdLnRleHQuc3RyaXAoKSAgIyDmnJ/lj7cKICAgICAgICAgICAgcmVkX2JhbGxzID0gW2NlbGxzW2ldLnRleHQuc3RyaXAoKSBmb3IgaSBpbiByYW5nZSgxLCA3KV0gICMg57qi55CDCiAgICAgICAgICAgIGJsdWVfYmFsbCA9IGNlbGxzWzddLnRleHQuc3RyaXAoKSAgIyDok53nkIMKICAgICAgICAgICAgZGF0YS5hcHBlbmQoW3BlcmlvZCwgKnJlZF9iYWxscywgYmx1ZV9iYWxsXSkKCiMg5L+d5a2Y5Li6Q1NWCmNvbHVtbnMgPSBbJ+acn+WPtycsICfnuqLnkIMxJywgJ+e6oueQgzInLCAn57qi55CDMycsICfnuqLnkIM0JywgJ+e6oueQgzUnLCAn57qi55CDNicsICfok53nkIMnXQpkZiA9IHBkLkRhdGFGcmFtZShkYXRhLCBjb2x1bW5zPWNvbHVtbnMpCmRmLnRvX2Nzdignc3NxX3Jlc3VsdHMuY3N2JywgaW5kZXg9RmFsc2UsIGVuY29kaW5nPSd1dGYtOCcpCgpwcmludCgmcXVvdDvmlbDmja7mipPlj5blrozmiJDvvIzkv53lrZjkuLogc3NxX3Jlc3VsdHMuY3N2JnF1b3Q7KQ==
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 定义目标URL
base_url = "https://w...content-available-to-author-only...v.cn/ygkj/wqkjgg/ssq/"
# 初始化数据存储
data = []
# 页码循环抓取
for page in range(1, 5): # 修改范围以包含更多页
url = f"{base_url}?page={page}"
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
# 查找数据表格
table = soup.find("table") # 根据网站表格结构调整选择器
rows = table.find_all("tr")[1:] # 跳过标题行
for row in rows:
cells = row.find_all("td")
if len(cells) == 8: # 确保字段数量正确
period = cells[0].text.strip() # 期号
red_balls = [cells[i].text.strip() for i in range(1, 7)] # 红球
blue_ball = cells[7].text.strip() # 蓝球
data.append([period, *red_balls, blue_ball])
# 保存为CSV
columns = ['期号', '红球1', '红球2', '红球3', '红球4', '红球5', '红球6', '蓝球']
df = pd.DataFrame(data, columns=columns)
df.to_csv('ssq_results.csv', index=False, encoding='utf-8')
print("数据抓取完成,保存为 ssq_results.csv")