Einführung in PySpark
Benjamin Schmidt
Data Engineer
spark.sql()# SQL-Aggregationsabfrage
spark.sql("""
SELECT Department, SUM(Salary) AS Total_Salary, AVG(Salary) AS Average_Salary
FROM employees
GROUP BY Department
""").show()
# Gehälter über 3000 filtern filtered_df = df.filter(df.Salary > 3000) # Gefiltertes DataFrame als View registrieren filtered_df.createOrReplaceTempView("filtered_employees")# Aggregation mit SQL auf der gefilterten View spark.sql(""" SELECT Department, COUNT(*) AS Employee_Count FROM filtered_employees GROUP BY Department """).show()
# Beispiel für Typumwandlung data = [("HR", "3000"), ("IT", "4000"), ("Finance", "3500")] columns = ["Department", "Salary"] df = spark.createDataFrame(data, schema=columns)# Spalte Salary in Integer umwandeln df = df.withColumn("Salary", df["Salary"].cast("int")) # Aggregation durchführen df.groupBy("Department").sum("Salary").show()
# Beispiel für Aggregation mit RDDs rdd = df.rdd.map(lambda row: (row["Department"], row["Salary"]))rdd_aggregated = rdd.reduceByKey(lambda x, y: x + y)print(rdd_aggregated.collect())
groupBy() minimierenexplain() den Ausführungsplan prüfen und optimierenSUM() und AVERAGE() zur DatenzusammenfassungEinführung in PySpark