目录

实验二-Python-常用类库与数据库访问Python-网络爬虫-大数据采集CQUPT

实验二 Python 常用类库与数据库访问&Python 网络爬虫-大数据采集(CQUPT)

一、实验目的

实验三 Python 常用类库与数据库访问:

1、熟悉在 Jupyter notebook 平台下与 MySQL 数据库建立联系。

2、通过实际案例编程,掌握 Jupyter notebook 平台下调用 MySQL 的基础语法。

3、熟悉 panadas、pymysql 等库的使用方法

实验四 Python 网络爬虫-大数据采集:

1、认识数据爬虫在大数据挖掘中的重要作用。

2、掌握使用 urllib、requests、BeautifulSoup 等方法进行爬虫。

3、了解 Python 中使用 Scrapy 框架进行网络爬虫的方法

二、实验原理

运用 Anaconda 搭建的 Spyder平台编写 Python 实例程序。

三、使用软件平台

1、Windows 11电脑一台。

2、Anaconda、Python、Spyder平台。

四、实验内容

实例一:创建名单数据库

https://i-blog.csdnimg.cn/direct/939db988dc4d4e2196bef21c925f6a27.png

结果: https://i-blog.csdnimg.cn/direct/fd281d1d6e694b578817b5fbd2ff3c67.png

https://i-blog.csdnimg.cn/direct/6b7aacedb4ed4d72bd7ba1ae5faad9a0.png

代码:

import pymysql
from openpyxl import load_workbook

# 加载 Excel 文件
workbook = load_workbook(filename='student_420.xlsx')  # 读取文件
sheet = workbook.active

# 读取 Excel 数据到字典
students_dict = {}
for row in sheet.iter_rows(min_row=2, values_only=True):  # 跳过表头
    student_id, name = row
    students_dict[student_id] = name

# 建立数据库连接
db = pymysql.connect(
    host='localhost',
    user='root',
    password='123456',
    database='student_420' 
)
cursor = db.cursor()

# 创建 `mingdan` 表
cursor.execute("""
    CREATE TABLE IF NOT EXISTS mingdan (
        id INT PRIMARY KEY,
        name VARCHAR(100)
    )
""")

# 清空 `mingdan` 表
cursor.execute("TRUNCATE TABLE mingdan")

# 插入字典数据到数据库
for student_id, name in students_dict.items():
    cursor.execute("INSERT INTO mingdan (id, name) VALUES (%s, %s)", (student_id, name))

# 提交数据插入操作
db.commit()

# 从数据库读取数据并转换为字典(验证插入结果)
cursor.execute("SELECT * FROM mingdan")
students_from_db = cursor.fetchall()
students_dict_from_db = {student[0]: student[1] for student in students_from_db}

# 输出从数据库读取的数据字典
print(students_dict_from_db)

# 关闭连接
cursor.close()
db.close()

实例二:读出名单数据库

结果:

https://i-blog.csdnimg.cn/direct/e809f64802f747a59fd1e8b5459df954.png

代码:

import pymysql

# 连接到数据库
db = pymysql.connect(
    host='localhost',
    user='root',
    password='123456',
    database='student_420'  # 请将 student_XYY 替换实际数据库名称
)
cursor = db.cursor()

# 查询 `mingdan` 表中的数据
cursor.execute("SELECT id, name FROM mingdan")

# 将数据存入字典
student_dict = {}
for student_id, name in cursor.fetchall():
    student_dict[student_id] = name

# 输出字典
print(student_dict)

# 关闭数据库连接
cursor.close()
db.close()

实例三:成绩录入和读出

结果: https://i-blog.csdnimg.cn/direct/53217d5f4ae74a41b0901f3611a8053c.png

代码:

import pymysql

# 建立数据库连接
db = pymysql.connect(
    host='localhost',
    user='root',
    password='123456',
    database='student_420'  # 请将 student_XYY 替换为您的实际数据库名称
)
cursor = db.cursor()

# 创建 `chengji` 表,包含 `name`, `yuwen`, `shuxue`, `yingyu`, `jisuanji` 字段
cursor.execute("""
    CREATE TABLE IF NOT EXISTS chengji (
        name VARCHAR(50),
        yuwen INT,
        shuxue INT,
        yingyu INT,
        jisuanji INT
    )
""")

# 插入成绩数据
scores = [
    ('张三', 88, 90, 92, 95),
    ('李四', 85, 89, 90, 94),
    ('王五', 89, 92, 88, 90),
    ('丁六', 82, 89, 90, 91)
]

for score in scores:
    cursor.execute("INSERT INTO chengji (name, yuwen, shuxue, yingyu, jisuanji) VALUES (%s, %s, %s, %s, %s)", score)

# 提交插入
db.commit()

# 查询并输出 `chengji` 表中的姓名和语文成绩
cursor.execute("SELECT name, yuwen FROM chengji")
results = cursor.fetchall()

# 输出结果
for name, yuwen in results:
    print(f"姓名: {name}, 语文成绩: {yuwen}")

# 关闭数据库连接
cursor.close()
db.close()

实例四:爬取豆瓣小说页面的相关信息

结果:

https://i-blog.csdnimg.cn/direct/e7e0e0b3b7f74e25a2d02c49af882295.png

代码:

import requests
from bs4 import BeautifulSoup

# 豆瓣小说页面的 URL
url = 'https://www.douban.com/tag/小说/?focus=book'

# 设置请求头,模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# 发送请求获取网页内容
response = requests.get(url, headers=headers)

# 检查请求是否成功
if response.status_code == 200:
    # 解析网页内容
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # 初始化一个列表来存储书籍信息
    books = []

    # 查找书籍元素
    for book in soup.select("div.mod-list.book-list dl"):
        title = book.select_one("a.title").get_text(strip=True)
        desc = book.select_one("div.desc").get_text(strip=True)
        
        # 拆分描述信息,提取作者、译者、出版社、出版日期、价格等字段
        desc_parts = desc.split(" / ")
        author = desc_parts[0]
        translator = desc_parts[1] if len(desc_parts) > 4 else "无"
        publisher = desc_parts[-3]
        date = desc_parts[-2]
        price = desc_parts[-1]

        # 将书籍信息添加到列表
        books.append({
            "书名": title,
            "作者": author,
            "译者": translator,
            "出版社": publisher,
            "出版日期": date,
            "价格": price
        })

    # 先输出所有书名
    for book in books:
        print(book["书名"])
    print("\n" + "="*30 + "\n")

    # 逐条输出每本书的详细信息
    for book in books:
        print(f"{book['作者']} / {book['译者']} / {book['出版社']} / {book['出版日期']} / {book['价格']}")
else:
    print("无法获取网页内容,状态码:", response.status_code)

实例五:将上题网页中爬取的数据存储在文件中

结果:

https://i-blog.csdnimg.cn/direct/09b25b7f1e654c6abb9e1508322a50a0.png

代码:

import requests
from bs4 import BeautifulSoup
import pandas as pd

# 豆瓣小说页面的 URL
url = 'https://www.douban.com/tag/小说/?focus=book'

# 设置请求头,模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# 发送请求获取网页内容
response = requests.get(url, headers=headers)

# 检查请求是否成功
if response.status_code == 200:
    # 解析网页内容
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # 初始化一个列表来存储书籍信息
    books = []

    # 查找书籍元素
    for book in soup.select("div.mod-list.book-list dl")[:9]:  # 仅提取前9本书
        title = book.select_one("a.title").get_text(strip=True)
        desc = book.select_one("div.desc").get_text(strip=True)
        
        # 拆分描述信息,提取作者、译者、出版社、出版日期、价格等字段
        desc_parts = desc.split(" / ")
        author = desc_parts[0]
        translator = desc_parts[1] if len(desc_parts) > 4 else "无"
        publisher = desc_parts[-3]
        date = desc_parts[-2]
        price = desc_parts[-1]

        # 将书籍信息添加到列表
        books.append({
            "书名": title,
            "作者": author,
            "译者1": translator,
            "出版社": publisher,
            "出版日期": date,
            "价格": price
        })

    # 将数据转换为 DataFrame
    df = pd.DataFrame(books)
    
    # 让用户输入文件名
    filename = input("爬取完成,请输入要保存数据的文件名:") + ".csv"
    #df.to_csv(filename, index=False, encoding='utf-8-sig')
    df.to_csv(filename, index=False, encoding='gb18030')
    print(f"数据已保存到文件:{filename}")
else:
    print("无法获取网页内容,状态码:", response.status_code)

思考题一:

结果:

https://i-blog.csdnimg.cn/direct/4de7b0c1f02943cba8471f3dede30e73.png

代码:

import pymysql
import pandas as pd
from sqlalchemy import create_engine

# 建立数据库连接
db = pymysql.connect(
    host='localhost',
    user='root',
    password='123456',
    database='student_420'
)
cursor = db.cursor()

# 检查并添加缺失的列
def add_column_if_not_exists(cursor, table_name, column_name, column_definition):
    cursor.execute(f"""
        SELECT COUNT(*)
        FROM INFORMATION_SCHEMA.COLUMNS
        WHERE TABLE_NAME = '{table_name}' AND COLUMN_NAME = '{column_name}'
    """)
    if cursor.fetchone()[0] == 0:
        cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_definition}")

# 为 `chengji` 表添加 `pingjunfen` 和 `zuigaofen` 列
add_column_if_not_exists(cursor, 'chengji', 'pingjunfen', 'FLOAT')
add_column_if_not_exists(cursor, 'chengji', 'zuigaofen', 'INT')

# 从数据库读取数据到字典
query = "SELECT name, yuwen, shuxue, yingyu, jisuanji FROM chengji"
cursor.execute(query)
data = cursor.fetchall()

# 将数据转换为字典
students_dict = {}
for row in data:
    name, yuwen, shuxue, yingyu, jisuanji = row
    students_dict[name] = {
        'yuwen': yuwen,
        'shuxue': shuxue,
        'yingyu': yingyu,
        'jisuanji': jisuanji
    }

# 使用 Pandas DataFrame 进行数据处理
df = pd.DataFrame.from_dict(students_dict, orient='index')
df['平均分'] = df[['yuwen', 'shuxue', 'yingyu', 'jisuanji']].mean(axis=1)
df['最高分'] = df[['yuwen', 'shuxue', 'yingyu', 'jisuanji']].max(axis=1)

# 将计算的平均分和最高分更新到 `chengji` 表中
for name, row in df.iterrows():
    cursor.execute("""
        UPDATE chengji
        SET pingjunfen = %s, zuigaofen = %s
        WHERE name = %s
    """, (row['平均分'], row['最高分'], name))

# 提交更改
db.commit()

# 从数据库中读取所有数据并输出
engine = create_engine("mysql+pymysql://root:123456@localhost/student_420")
df_all = pd.read_sql("SELECT * FROM chengji", engine)
print("数据库中的学生成绩:")
print(df_all)

# 关闭连接
cursor.close()
db.close()

思考题二:

https://i-blog.csdnimg.cn/direct/51c899004b6a4c78b8a919a0e425dc4c.png

结果:

https://i-blog.csdnimg.cn/direct/9dfb2b437a3a449093f634382491db91.png

代码:

import pandas as pd
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup

# 使用 gb2312 编码解析 HTML 并提取数据
with open("unrate.htm", "r", encoding="gb2312") as file:
    html_content = file.read()

soup = BeautifulSoup(html_content, "html.parser")

# 假设数据在表格中,找到表格并提取数据
dates = []
values = []

for row in soup.find_all("tr")[1:]:  # 跳过标题行
    cols = row.find_all("td")
    date = cols[0].get_text(strip=True)
    value_str = cols[1].get_text(strip=True)
    
    # 检查 value_str 是否为空并转换为浮点数
    if value_str:
        try:
            value = float(value_str)
            dates.append(date)
            values.append(value)
        except ValueError:
            print(f"无法转换为浮点数: {value_str},日期: {date}")

# 创建 DataFrame 并保存为 CSV
data = {"DATE": dates, "VALUE": values}
df = pd.DataFrame(data)

# 获取用户输入文件名
filename = input("请输入保存的文件名(例如 SYL_199):") + ".csv"
df.to_csv(filename, index=False, encoding="utf-8-sig")
print(f"数据已保存到文件:{filename}")

# 筛选 2014 年的数据
df["DATE"] = pd.to_datetime(df["DATE"])
df_2014 = df[df["DATE"].dt.year == 2014]

# 绘制 2014 年失业率曲线图
plt.plot(df_2014["DATE"], df_2014["VALUE"], marker="o")
plt.xlabel("Date")
plt.ylabel("Unemployment Rate (%)")
plt.title("Unemployment Rate in 2014")
plt.xticks(rotation=45)
plt.grid()
plt.show()