From 6b0a9e1cc5cc98862d3ce147a2ecb3f24dd4c475 Mon Sep 17 00:00:00 2001 From: AiLe <1449854570@qq.com> Date: Fri, 3 Jan 2025 09:00:49 +0800 Subject: [PATCH] =?UTF-8?q?sparksql=E9=83=A8=E5=88=86=E7=BB=9F=E8=AE=A1?= =?UTF-8?q?=E5=90=84=E8=AE=A2=E5=8D=95=E6=9C=89=E6=95=88=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kafka_streaming.py | 3 -- sparksql.py | 86 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 sparksql.py diff --git a/kafka_streaming.py b/kafka_streaming.py index 539d06e..806deb5 100644 --- a/kafka_streaming.py +++ b/kafka_streaming.py @@ -2,9 +2,6 @@ import os from pyspark.sql import SparkSession import pyspark.sql.functions as F -# # os.environ['JAVA_HOME'] = 'C:\\Program Files\\Java\\jdk1.8.0_351' -# os.environ['HADOOP_HOME'] = 'D:\\CodeDevelopment\\DevelopmentEnvironment\\hadoop-2.8.1' - if __name__ == '__main__': # 1- 创建 SparkSession spark = SparkSession.builder \ diff --git a/sparksql.py b/sparksql.py new file mode 100644 index 0000000..a737818 --- /dev/null +++ b/sparksql.py @@ -0,0 +1,86 @@ +import pymysql +from pyspark.sql import SparkSession +import pyspark.sql.functions as F +from pyspark.sql.window import Window +from pyspark.sql.streaming import DataStreamWriter + +def write_to_mysql(batch_df, batch_id): + # 将 DataFrame 转换为 Pandas DataFrame + pandas_df = batch_df.toPandas() + + # 连接 MySQL 数据库 + connection = pymysql.connect( + host='43.140.205.103', + user='kaku', + password='p4J7fY8mc6hcZfjG', + database='kaku', + charset='utf8mb4' # 设置为 utf8mb4 编码,以支持所有 Unicode 字符 + ) + + cursor = connection.cursor() + + # 插入数据到 MySQL + for _, row in pandas_df.iterrows(): + order_name = row['order_name'] + status = row['status'] + count = row['count'] + + # 插入语句 + sql = """ + INSERT INTO order_name_YN (order_name, status, count) + VALUES (%s, %s, %s) + ON DUPLICATE KEY UPDATE count = count + VALUES(count) + """ + + cursor.execute("SET NAMES 'utf8mb4';") + cursor.execute(sql, (order_name, status, count)) + + + # 提交事务并关闭连接 + connection.commit() + cursor.close() + connection.close() + +if __name__ == '__main__': + # 1- 创建 SparkSession + spark = SparkSession.builder \ + .config("spark.sql.shuffle.partitions", 1) \ + .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1") \ + .appName('ss_kafka_push_to_mysql') \ + .master('local[*]') \ + .getOrCreate() + + # 2- 读取 Kafka 数据流 + kafka_stream = spark.readStream \ + .format("kafka") \ + .option("kafka.bootstrap.servers", "niit-node2:9092") \ + .option("subscribePattern", "orders") \ + .load() + + # 3- 解析数据并使用英文列名 + parsed_stream = kafka_stream.selectExpr("cast(value as string) as value", "timestamp") \ + .withColumn("order_id", F.split(F.col("value"), "\t")[0]) \ + .withColumn("order_type", F.split(F.col("value"), "\t")[1]) \ + .withColumn("order_name", F.split(F.col("value"), "\t")[2]) \ + .withColumn("order_quantity", F.split(F.col("value"), "\t")[3]) \ + .withColumn("status", F.split(F.col("value"), "\t")[4]) \ + .drop("value") + + # 4- 定义时间窗口,并使用窗口聚合 + windowed_stream = parsed_stream \ + .groupBy( + F.window(parsed_stream.timestamp, "2 seconds"), # 2分钟的时间窗口 + parsed_stream.order_name, + parsed_stream.status + ) \ + .agg(F.count("*").alias("count")) + + # 5- 使用 foreachBatch 将数据推送到 MySQL + windowed_stream.writeStream \ + .foreachBatch(write_to_mysql) \ + .outputMode("update") \ + .trigger(processingTime="2 seconds") \ + .start() + + # 等待流任务结束 + spark.streams.awaitAnyTermination()