#!/user/bin/env spark-python
#-*-coding:utf-8 -*-
import sys, datetime
from os.path import abspath
from pyspark.sql import SparkSession
import pandas as pd
if __name__ == "__main__":
# 获取IP集合
ip_set = pd.read_csv("./ip.csv", header=None)
ip_set = tuple(ip_set[0].tolist())
# spark连接初始化
warehouse_location = abspath(
'spark-warehouse') # warehouse_location points to the default location for managed databases and tables
spark = SparkSession \
.builder \
.appName("Python Spark SQL Hive") \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# 给in语句传参, ip_set是tuple类型
sql = r'''select time, value,ip from cpu_data_copy where (partition_date between {0} and {1}) and (ip in {2})'''.format(begin_date, predicted_before_date_str, ip_set)
spark.sql('use 数据库名')
sql_data = spark.sql(sql) #pyspark.sql.DataFrame类型
# 数据显示
sql_data.show()
reslut = sql_data.toPandas() # pyspark.sql.DataFrame转pandas的DataFrame
reslut.to_csv("./results_cpu.csv")
spark.stop() # 必须放在所有spark语句执行完后
print("******************" + sql)
print("finish!")