588a03bf04fe7a5d8e14a607fd15ac0c47338044..28e4ae897ac41cd7d6ef497590fd375ff1003c6a
2025-04-14 rp
处理后的特征
28e4ae 对比 | 目录
2025-04-14 rp
备份
9036cd 对比 | 目录
2025-04-14 rp
测试1
a440bb 对比 | 目录
已添加3个文件
2215 ■■■■■ 文件已修改
merged_data.csv 补丁 | 查看 | 原始文档 | blame | 历史
testonly.py 210 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
yd_test2.py 2005 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
merged_data.csv
Binary files differ
testonly.py
¶Ô±ÈÐÂÎļþ
@@ -0,0 +1,210 @@
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error
# Check numpy version
if np.__version__ < '1.23.5':
    print(f"Warning: Current numpy version {np.__version__} may cause compatibility issues.")
    print("Please upgrade numpy to version 1.23.5 or higher.")
try:
    import tensorflow as tf
    print(f"TensorFlow version: {tf.__version__}")
except ImportError as e:
    print("Error importing TensorFlow:", e)
    sys.exit(1)
# Check TensorFlow version
if tf.__version__ < '2.10.0':
    print(f"Warning: Current TensorFlow version {tf.__version__} may cause compatibility issues.")
    print("Please upgrade TensorFlow to version 2.10.0 or higher.")
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv1D, LSTM, Dense, Layer
from tensorflow.keras.callbacks import EarlyStopping
# ================================
# 1. æ•°æ®é¢„处理模块
# ================================
class DataPreprocessor:
    def __init__(self, file_path, target_col='Value'):
        # ä½¿ç”¨æ­£ç¡®çš„编码和列名
        self.df = pd.read_csv(file_path, encoding='utf-8-sig', parse_dates=['DateTime'], index_col='DateTime')
        self.target_col = target_col
        self.scaler = MinMaxScaler(feature_range=(0, 1))
    def preprocess(self, resample_freq='h'):  # ä½¿ç”¨å°å†™çš„'h'代替大写的'H'
        """数据重采样与归一化"""
        # åªé€‰æ‹©Value列进行处理
        value_series = self.df[self.target_col]
        # å¤„理非等间隔采样
        df_resampled = value_series.resample(resample_freq).mean()
        df_filled = df_resampled.fillna(method='ffill').fillna(method='bfill')  # åŒå‘å¡«å……
        # å½’一化处理
        self.scaled_data = self.scaler.fit_transform(df_filled.values.reshape(-1, 1))
        self.dates = df_filled.index
        return self.scaled_data, self.dates
    def create_sequences(self, data, look_back=72, pred_steps=120):
        """创建监督学习序列"""
        X, Y = [], []
        for i in range(len(data) - look_back - pred_steps):
            X.append(data[i:(i + look_back)])
            Y.append(data[(i + look_back):(i + look_back + pred_steps)])
        return np.array(X), np.array(Y)
# ================================
# 2. æ¨¡åž‹æž„建模块
# ================================
class TemporalAttention(Layer):
    """时间注意力机制层"""
    def __init__(self, units):
        super(TemporalAttention, self).__init__()
        self.W1 = Dense(units)
        self.W2 = Dense(units)
        self.V = Dense(1)
    def call(self, encoder_output, lstm_output):
        lstm_output = tf.expand_dims(lstm_output, 1)
        score = self.V(tf.nn.tanh(
            self.W1(encoder_output) + self.W2(lstm_output)))
        attention_weights = tf.nn.softmax(score, axis=1)
        context_vector = attention_weights * encoder_output
        context_vector = tf.reduce_sum(context_vector, axis=1)
        return context_vector, attention_weights
class SalinityPredictor:
    def __init__(self, look_back=72, pred_steps=120):
        self.look_back = look_back    # åŽ†å²çª—å£ï¼ˆå°æ—¶ï¼‰
        self.pred_steps = pred_steps  # é¢„测步长(5天=120小时)
    def build_model(self):
        """构建CNN-LSTM-Attention混合模型"""
        inputs = Input(shape=(self.look_back, 1))
        # CNN特征提取
        cnn = Conv1D(64, 3, activation='relu', padding='same')(inputs)
        cnn = Conv1D(32, 3, activation='relu', padding='same')(cnn)
        # LSTM时序建模
        lstm_out = LSTM(128, return_sequences=True)(cnn)
        lstm_out = LSTM(64, return_sequences=False)(lstm_out)
        # æ³¨æ„åŠ›æœºåˆ¶
        context_vector, _ = TemporalAttention(64)(cnn, lstm_out)
        # è¾“出层
        outputs = Dense(self.pred_steps)(context_vector)
        self.model = Model(inputs=inputs, outputs=outputs)
        self.model.compile(optimizer='adam', loss='mse')
        return self.model
    def dynamic_split(self, data, dates, cutoff_date):
        """动态划分训练集"""
        cutoff_idx = np.where(dates <= cutoff_date)[0][-self.look_back]
        train_data = data[:cutoff_idx]
        return train_data
    def train(self, X_train, y_train, epochs=200, batch_size=32):
        """模型训练"""
        early_stop = EarlyStopping(monitor='val_loss', patience=20)
        history = self.model.fit(
            X_train, y_train,
            epochs=epochs,
            batch_size=batch_size,
            validation_split=0.2,
            callbacks=[early_stop],
            verbose=1
        )
        return history
    def predict(self, last_sequence):
        """递归多步预测"""
        predictions = []
        current_seq = last_sequence.copy()
        for _ in range(self.pred_steps):
            pred = self.model.predict(current_seq[np.newaxis, :, :], verbose=0)
            predictions.append(pred[0][0])  # åªå–第一个预测值
            current_seq = np.roll(current_seq, -1, axis=0)
            current_seq[-1] = pred[0][0]  # ä½¿ç”¨å•个预测值更新序列
        return np.array(predictions)
# ================================
# 3. å®Œæ•´æµç¨‹æ‰§è¡Œ
# ================================
if __name__ == "__main__":
    # å‚数配置
    DATA_PATH = 'D:\opencv\.venv\一取水.csv'
    CUTOFF_DATE = '2024-12-20 00:00'  # ç”¨æˆ·æŒ‡å®šåˆ†å‰²æ—¶é—´ç‚¹
    LOOK_BACK = 72    # 3天历史数据
    PRED_STEPS = 120  # é¢„测5天
    # æ•°æ®é¢„处理
    preprocessor = DataPreprocessor(DATA_PATH)
    scaled_data, dates = preprocessor.preprocess()
    X, Y = preprocessor.create_sequences(scaled_data, LOOK_BACK, PRED_STEPS)
    # æ¨¡åž‹æž„建
    predictor = SalinityPredictor(LOOK_BACK, PRED_STEPS)
    model = predictor.build_model()
    model.summary()
    # åŠ¨æ€è®­ç»ƒ
    train_data = predictor.dynamic_split(scaled_data, dates, pd.to_datetime(CUTOFF_DATE))
    X_train, y_train = preprocessor.create_sequences(train_data, LOOK_BACK, PRED_STEPS)
    history = predictor.train(X_train, y_train)
    # é¢„测验证
    cutoff_idx = np.where(dates <= pd.to_datetime(CUTOFF_DATE))[0][-1]
    last_seq = scaled_data[cutoff_idx-LOOK_BACK:cutoff_idx]  # ä½¿ç”¨åˆ†å‰²ç‚¹å‰çš„æ•°æ®ä½œä¸ºè¾“å…¥
    scaled_pred = predictor.predict(last_seq)
    predictions = preprocessor.scaler.inverse_transform(scaled_pred.reshape(-1, 1))
    # ç»“果可视化
    true_dates = pd.date_range(start=pd.to_datetime(CUTOFF_DATE), periods=PRED_STEPS, freq='h')
    plt.figure(figsize=(15, 6))
    # ç»˜åˆ¶åˆ†å‰²ç‚¹å‰çš„历史数据
    plt.plot(dates[cutoff_idx-PRED_STEPS:cutoff_idx],
             preprocessor.scaler.inverse_transform(scaled_data[cutoff_idx-PRED_STEPS:cutoff_idx]),
             'b-', label='历史数据(分割点前)')
    # ç»˜åˆ¶åˆ†å‰²ç‚¹åŽçš„实际数据
    plt.plot(dates[cutoff_idx:cutoff_idx+PRED_STEPS],
             preprocessor.scaler.inverse_transform(scaled_data[cutoff_idx:cutoff_idx+PRED_STEPS]),
             'g-', label='实际数据(分割点后)')
    # ç»˜åˆ¶é¢„测数据
    plt.plot(true_dates, predictions, 'r--', label='预测数据')
    # æ·»åŠ åˆ†å‰²çº¿
    plt.axvline(x=pd.to_datetime(CUTOFF_DATE), color='k', linestyle='--', label='分割时间点')
    plt.title(f'盐度预测对比({CUTOFF_DATE}后5天)')
    plt.xlabel('时间')
    plt.ylabel('盐度值')
    plt.legend()
    plt.grid(True)
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.show()
    # æ€§èƒ½æŒ‡æ ‡
    true_values = preprocessor.scaler.inverse_transform(scaled_data[cutoff_idx:cutoff_idx+PRED_STEPS])
    mae = mean_absolute_error(true_values, predictions)
    rmse = np.sqrt(mean_squared_error(true_values, predictions))
    print(f'验证指标 => MAE: {mae:.3f}, RMSE: {rmse:.3f}')
# ================================
# 4. æ¨¡åž‹ä¿å­˜ä¸ŽåŠ è½½ï¼ˆå¯é€‰ï¼‰
# ================================
# model.save('salinity_predictor.h5')
# loaded_model = tf.keras.models.load_model('salinity_predictor.h5', custom_objects={'TemporalAttention': TemporalAttention})
yd_test2.py
¶Ô±ÈÐÂÎļþ
@@ -0,0 +1,2005 @@
# xgboost修改版本
import os
import pickle
import pandas as pd
import numpy as np
from numpy.lib.stride_tricks import sliding_window_view
import tkinter as tk
import tkinter.font as tkfont
from tkinter import ttk
from datetime import timedelta
from time import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from xgboost import XGBRegressor
from lunardate import LunarDate
from sklearn.model_selection import train_test_split, TimeSeriesSplit
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib
# é…ç½® matplotlib ä¸­æ–‡æ˜¾ç¤º
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial Unicode MS']
matplotlib.rcParams['axes.unicode_minus'] = False
matplotlib.rcParams['font.family'] = 'sans-serif'
# å…¨å±€ç¼“存变量及特征名称
cached_model = None
last_training_time = None
feature_columns = None
current_view = {'xlim': None, 'ylim': None}  # ç”¨äºŽå­˜å‚¨å½“前图表视图
# æ•°æ®åŠ è½½ä¸Žé¢„å¤„ç†å‡½æ•°
# -------------------------------
def load_data(upstream_file, downstream_file, river_level_file=None, flow_file=None, rainfall_file=None):
    """
    åŠ è½½æ‰€æœ‰ç›¸å…³æ•°æ®å¹¶è¿›è¡Œæ•°æ®è´¨é‡å¤„ç†
    """
    try:
        # è¯»å–上游和下游数据
        upstream_df = pd.read_csv(upstream_file)
        downstream_df = pd.read_csv(downstream_file)
    except FileNotFoundError:
        print("文件未找到,请检查路径")
        return None
    # ç¡®ä¿åˆ—名一致
    upstream_df.columns = ['DateTime', 'TagName', 'Value']
    downstream_df.columns = ['DateTime', 'TagName', 'Value']
    # è½¬æ¢æ—¶é—´æ ¼å¼å¹¶è®¾ç½®ä¸ºç´¢å¼•
    upstream_df['DateTime'] = pd.to_datetime(upstream_df['DateTime'])
    downstream_df['DateTime'] = pd.to_datetime(downstream_df['DateTime'])
    # è®¾ç½®DateTime为索引
    upstream_df.set_index('DateTime', inplace=True)
    downstream_df.set_index('DateTime', inplace=True)
    # æ•°å€¼å¤„理 - ä½¿ç”¨æ›´ç¨³å¥çš„转换方法
    for df in [upstream_df, downstream_df]:
        df['Value'] = pd.to_numeric(df['Value'], errors='coerce')
        # ä½¿ç”¨IQR方法检测异常值
        Q1 = df['Value'].quantile(0.25)
        Q3 = df['Value'].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        # å°†å¼‚常值替换为边界值
        df.loc[df['Value'] < lower_bound, 'Value'] = lower_bound
        df.loc[df['Value'] > upper_bound, 'Value'] = upper_bound
    # è¿‡æ»¤ç›åº¦å°äºŽ5的数据
    upstream_df = upstream_df[upstream_df['Value'] >= 5]
    downstream_df = downstream_df[downstream_df['Value'] >= 5]
    # é‡å‘½åValue列
    upstream_df = upstream_df.rename(columns={'Value': 'upstream'})[['upstream']]
    downstream_df = downstream_df.rename(columns={'Value': 'downstream'})[['downstream']]
    # åˆå¹¶æ•°æ®
    merged_df = pd.merge(upstream_df, downstream_df, left_index=True, right_index=True, how='inner')
    # åŠ è½½é•¿æ±Ÿæ°´ä½æ•°æ®ï¼ˆå¦‚æžœæä¾›ï¼‰
    if river_level_file:
        try:
            river_level_df = pd.read_csv(river_level_file)
            print(f"成功读取水位数据文件: {river_level_file}")
            # ç¡®ä¿åˆ—名一致
            if len(river_level_df.columns) >= 3:
                river_level_df.columns = ['DateTime', 'TagName', 'Value']
            elif len(river_level_df.columns) == 2:
                river_level_df.columns = ['DateTime', 'Value']
                river_level_df['TagName'] = 'water_level'
            # æ•°æ®å¤„理
            river_level_df['DateTime'] = pd.to_datetime(river_level_df['DateTime'])
            river_level_df.set_index('DateTime', inplace=True)
            river_level_df['Value'] = pd.to_numeric(river_level_df['Value'], errors='coerce')
            # ä½¿ç”¨IQR方法处理异常值
            Q1 = river_level_df['Value'].quantile(0.25)
            Q3 = river_level_df['Value'].quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            river_level_df.loc[river_level_df['Value'] < lower_bound, 'Value'] = lower_bound
            river_level_df.loc[river_level_df['Value'] > upper_bound, 'Value'] = upper_bound
            # é‡å‘½åå¹¶ä¿ç•™éœ€è¦çš„列
            river_level_df = river_level_df.rename(columns={'Value': 'water_level'})[['water_level']]
            # åˆå¹¶åˆ°ä¸»æ•°æ®æ¡†
            merged_df = pd.merge(merged_df, river_level_df, left_index=True, right_index=True, how='left')
            # å¯¹æ°´ä½æ•°æ®è¿›è¡Œæ’值处理
            merged_df['water_level'] = merged_df['water_level'].interpolate(method='time', limit=24)
            merged_df['water_level'] = merged_df['water_level'].fillna(method='ffill').fillna(method='bfill')
            # åˆ›å»ºå¹³æ»‘的水位数据
            merged_df['water_level_smooth'] = merged_df['water_level'].rolling(window=24, min_periods=1, center=True).mean()
            # æ·»åŠ æ°´ä½è¶‹åŠ¿ç‰¹å¾
            merged_df['water_level_trend_1h'] = merged_df['water_level_smooth'].diff(1)
            merged_df['water_level_trend_24h'] = merged_df['water_level_smooth'].diff(24)
            print(f"水位数据加载成功,范围: {merged_df['water_level'].min()} - {merged_df['water_level'].max()}")
        except Exception as e:
            print(f"水位数据加载失败: {str(e)}")
    # åŠ è½½å¤§é€šæµé‡æ•°æ®ï¼ˆå¦‚æžœæä¾›ï¼‰
    if flow_file:
        try:
            flow_df = pd.read_csv(flow_file)
            print(f"成功读取流量数据文件: {flow_file}")
            # ç¡®ä¿åˆ—名一致
            if len(flow_df.columns) >= 3:
                flow_df.columns = ['DateTime', 'TagName', 'Value']
            elif len(flow_df.columns) == 2:
                flow_df.columns = ['DateTime', 'Value']
                flow_df['TagName'] = 'flow'
            # æ•°æ®å¤„理
            flow_df['DateTime'] = pd.to_datetime(flow_df['DateTime'])
            flow_df.set_index('DateTime', inplace=True)
            flow_df['Value'] = pd.to_numeric(flow_df['Value'], errors='coerce')
            # ä½¿ç”¨IQR方法处理异常值
            Q1 = flow_df['Value'].quantile(0.25)
            Q3 = flow_df['Value'].quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            flow_df.loc[flow_df['Value'] < lower_bound, 'Value'] = lower_bound
            flow_df.loc[flow_df['Value'] > upper_bound, 'Value'] = upper_bound
            # é‡å‘½åå¹¶ä¿ç•™éœ€è¦çš„列
            flow_df = flow_df.rename(columns={'Value': 'flow'})[['flow']]
            # åˆå¹¶åˆ°ä¸»æ•°æ®æ¡†
            merged_df = pd.merge(merged_df, flow_df, left_index=True, right_index=True, how='left')
            # å¯¹æµé‡æ•°æ®è¿›è¡Œæ’值处理
            merged_df['flow'] = merged_df['flow'].interpolate(method='time', limit=24)
            merged_df['flow'] = merged_df['flow'].fillna(method='ffill').fillna(method='bfill')
            # åˆ›å»ºå¹³æ»‘的流量数据
            merged_df['flow_smooth'] = merged_df['flow'].rolling(window=24, min_periods=1, center=True).mean()
            # æ·»åŠ æµé‡è¶‹åŠ¿ç‰¹å¾
            merged_df['flow_trend_1h'] = merged_df['flow_smooth'].diff(1)
            merged_df['flow_trend_24h'] = merged_df['flow_smooth'].diff(24)
            # æ·»åŠ æµé‡ç»Ÿè®¡ç‰¹å¾
            merged_df['mean_1d_flow'] = merged_df['flow_smooth'].rolling(window=24, min_periods=1).mean()
            merged_df['mean_3d_flow'] = merged_df['flow_smooth'].rolling(window=72, min_periods=1).mean()
            merged_df['std_1d_flow'] = merged_df['flow_smooth'].rolling(window=24, min_periods=1).std()
            # æ·»åŠ æµé‡å˜åŒ–ç‰¹å¾
            merged_df['flow_change_1h'] = merged_df['flow_smooth'].diff(1)
            merged_df['flow_change_24h'] = merged_df['flow_smooth'].diff(24)
            # æ·»åŠ æµé‡ä¸Žç›åº¦æ¯”çŽ‡ï¼ˆç¡®ä¿ä¸‹æ¸¸å¹³æ»‘æ•°æ®å·²åˆ›å»ºï¼‰
            if 'downstream_smooth' in merged_df.columns:
                merged_df['flow_sal_ratio'] = merged_df['flow_smooth'] / merged_df['downstream_smooth']
            else:
                print("警告: ä¸‹æ¸¸å¹³æ»‘数据未创建,跳过flow_sal_ratio计算")
            print(f"流量数据加载成功,范围: {merged_df['flow'].min()} - {merged_df['flow'].max()} m³/s")
        except Exception as e:
            print(f"流量数据加载失败: {str(e)}")
    # åŠ è½½é™é›¨é‡æ•°æ®ï¼ˆå¦‚æžœæä¾›ï¼‰
    if rainfall_file:
        try:
            rainfall_df = pd.read_csv(rainfall_file)
            print(f"成功读取降雨量数据文件: {rainfall_file}")
            # ç¡®ä¿åˆ—名一致
            if len(rainfall_df.columns) >= 3:
                rainfall_df.columns = ['DateTime', 'TagName', 'Value']
            elif len(rainfall_df.columns) == 2:
                rainfall_df.columns = ['DateTime', 'Value']
                rainfall_df['TagName'] = 'rainfall'
            # æ•°æ®å¤„理
            rainfall_df['DateTime'] = pd.to_datetime(rainfall_df['DateTime'])
            rainfall_df.set_index('DateTime', inplace=True)
            rainfall_df['Value'] = pd.to_numeric(rainfall_df['Value'], errors='coerce')
            # å¯¹äºŽé™é›¨é‡ï¼Œåªå¤„理异常大的值
            Q3 = rainfall_df['Value'].quantile(0.75)
            IQR = rainfall_df['Value'].quantile(0.75) - rainfall_df['Value'].quantile(0.25)
            upper_bound = Q3 + 3 * IQR
            rainfall_df.loc[rainfall_df['Value'] > upper_bound, 'Value'] = upper_bound
            # é‡å‘½åå¹¶ä¿ç•™éœ€è¦çš„列
            rainfall_df = rainfall_df.rename(columns={'Value': 'rainfall'})[['rainfall']]
            # åˆå¹¶åˆ°ä¸»æ•°æ®æ¡†
            merged_df = pd.merge(merged_df, rainfall_df, left_index=True, right_index=True, how='left')
            # å¯¹é™é›¨é‡æ•°æ®è¿›è¡Œå¤„理
            merged_df['rainfall'] = merged_df['rainfall'].fillna(0)  # å°†NaN替换为0(表示未降雨)
            merged_df['rainfall_smooth'] = merged_df['rainfall'].rolling(window=6, min_periods=1, center=True).mean()
            # è®¡ç®—累计降雨量特征
            merged_df['sum_1d_rainfall'] = merged_df['rainfall'].rolling(window=24, min_periods=1).sum()
            merged_df['sum_3d_rainfall'] = merged_df['rainfall'].rolling(window=72, min_periods=1).sum()
            # è®¡ç®—降雨强度特征
            merged_df['rainfall_intensity_1h'] = merged_df['rainfall'].rolling(window=1, min_periods=1).mean()
            merged_df['rainfall_intensity_6h'] = merged_df['rainfall'].rolling(window=6, min_periods=1).mean()
            # æ·»åŠ é™é›¨é‡è¶‹åŠ¿ç‰¹å¾
            merged_df['rainfall_trend_1h'] = merged_df['rainfall_smooth'].diff(1)
            merged_df['rainfall_trend_24h'] = merged_df['rainfall_smooth'].diff(24)
            print(f"降雨量数据加载成功,范围: {merged_df['rainfall'].min()} - {merged_df['rainfall'].max()} mm")
        except Exception as e:
            print(f"降雨量数据加载失败: {str(e)}")
            import traceback
            traceback.print_exc()
    # å¯¹ç›åº¦æ•°æ®è¿›è¡Œæ’值和平滑处理
    merged_df['upstream'] = merged_df['upstream'].interpolate(method='time', limit=24)
    merged_df['downstream'] = merged_df['downstream'].interpolate(method='time', limit=24)
    # ä½¿ç”¨å‰å‘后向填充处理剩余的NaN值
    merged_df['upstream'] = merged_df['upstream'].fillna(method='ffill').fillna(method='bfill')
    merged_df['downstream'] = merged_df['downstream'].fillna(method='ffill').fillna(method='bfill')
    # åˆ›å»ºå¹³æ»‘的盐度数据
    merged_df['upstream_smooth'] = merged_df['upstream'].rolling(window=24, min_periods=1, center=True).mean()
    merged_df['downstream_smooth'] = merged_df['downstream'].rolling(window=24, min_periods=1, center=True).mean()
    # æ·»åŠ ä¸Šæ¸¸å’Œä¸‹æ¸¸è¶‹åŠ¿ç‰¹å¾
    merged_df['upstream_trend_1h'] = merged_df['upstream_smooth'].diff(1)
    merged_df['upstream_trend_24h'] = merged_df['upstream_smooth'].diff(24)
    merged_df['downstream_trend_1h'] = merged_df['downstream_smooth'].diff(1)
    merged_df['downstream_trend_24h'] = merged_df['downstream_smooth'].diff(24)
    # å¯¹ä½Žç›åº¦éƒ¨åˆ†ä½¿ç”¨æ›´å¤§çš„窗口进行平滑
    low_sal_mask = merged_df['upstream'] < 50
    if low_sal_mask.any():
        merged_df.loc[low_sal_mask, 'upstream_smooth'] = merged_df.loc[low_sal_mask, 'upstream']\
            .rolling(window=48, min_periods=1, center=True).mean()
    # æ•°æ®éªŒè¯å’Œç»Ÿè®¡
    print("\n数据质量统计:")
    print(f"总数据量: {len(merged_df)}")
    print(f"上游盐度范围: {merged_df['upstream'].min():.2f} - {merged_df['upstream'].max():.2f}")
    print(f"下游盐度范围: {merged_df['downstream'].min():.2f} - {merged_df['downstream'].max():.2f}")
    if 'water_level' in merged_df.columns:
        print(f"水位范围: {merged_df['water_level'].min():.2f} - {merged_df['water_level'].max():.2f}")
        print(f"水位缺失比例: {merged_df['water_level'].isna().mean()*100:.2f}%")
    if 'flow' in merged_df.columns:
        print(f"流量范围: {merged_df['flow'].min():.2f} - {merged_df['flow'].max():.2f} m³/s")
        print(f"流量缺失比例: {merged_df['flow'].isna().mean()*100:.2f}%")
    if 'rainfall' in merged_df.columns:
        print(f"降雨量范围: {merged_df['rainfall'].min():.2f} - {merged_df['rainfall'].max():.2f} mm")
        print(f"降雨量缺失比例: {merged_df['rainfall'].isna().mean()*100:.2f}%")
    # é‡ç½®ç´¢å¼•,将DateTime作为列
    merged_df = merged_df.reset_index()
    return merged_df
# df = load_data('青龙港1.csv', '一取水.csv')
# æµ‹è¯•
# df = load_data('青龙港1.csv', '一取水.csv')
# df.to_csv('merged_data.csv', index=False)
# print(f"Merged data saved to 'merged_data.csv' successfully")
# # ç»˜åˆ¶ç›åº¦éšæ—¶é—´å˜åŒ–图
# plt.figure(figsize=(12, 6))
# plt.plot(df['DateTime'], df['upstream_smooth'], label='上游盐度', color='blue')
# plt.plot(df['DateTime'], df['downstream_smooth'], label='下游盐度', color='red')
# plt.xlabel('时间')
# plt.ylabel('盐度')
# plt.title('盐度随时间变化图')
# plt.legend()
# plt.grid(True)
# plt.tight_layout()
# plt.savefig('salinity_time_series.png', dpi=300)
# plt.show()
#特征工程部分
# -------------------------------
# æ·»åŠ å†œåŽ†ï¼ˆæ½®æ±ï¼‰ç‰¹å¾
# -------------------------------
def add_lunar_features(df):
    lunar_day, lunar_phase_sin, lunar_phase_cos, is_high_tide = [], [], [], []
    for dt in df['DateTime']:
        ld = LunarDate.fromSolarDate(dt.year, dt.month, dt.day)
        lunar_day.append(ld.day)
        lunar_phase_sin.append(np.sin(2 * np.pi * ld.day / 15))
        lunar_phase_cos.append(np.cos(2 * np.pi * ld.day / 15))
        is_high_tide.append(1 if (ld.day <= 5 or (ld.day >= 16 and ld.day <= 20)) else 0)
    df['lunar_day'] = lunar_day
    df['lunar_phase_sin'] = lunar_phase_sin
    df['lunar_phase_cos'] = lunar_phase_cos
    df['is_high_tide'] = is_high_tide
    return df
# -------------------------------
# ç”Ÿæˆå»¶è¿Ÿç‰¹å¾ï¼ˆå‘量化,利用 shift)
# -------------------------------
def batch_create_delay_features(df, delay_hours):
    """
    ä¸ºæ•°æ®æ¡†ä¸­çš„特定列创建延迟特征
    """
    # å®šä¹‰éœ€è¦åˆ›å»ºå»¶è¿Ÿç‰¹å¾çš„列
    target_columns = ['upstream_smooth', 'downstream_smooth']
    # å¦‚果存在水位数据列,也为它创建延迟特征
    if 'water_level_smooth' in df.columns:
        target_columns.append('water_level_smooth')
    elif 'water_level' in df.columns:
        print("注意: æ°´ä½å¹³æ»‘列不存在,使用原始水位列创建延迟特征")
        # åˆ›å»ºæ°´ä½å¹³æ»‘列
        df['water_level_smooth'] = df['water_level'].rolling(window=24, min_periods=1, center=True).mean()
        df['water_level_smooth'] = df['water_level_smooth'].fillna(df['water_level'])
        target_columns.append('water_level_smooth')
    # åˆ›å»ºå»¶è¿Ÿç‰¹å¾
    for column in target_columns:
        if column in df.columns:
            for delay in delay_hours:
                df[f'{column.split("_")[0]}_delay_{delay}h'] = df[column].shift(delay)
        else:
            print(f"警告: åˆ— {column} ä¸å­˜åœ¨ï¼Œè·³è¿‡åˆ›å»ºå»¶è¿Ÿç‰¹å¾")
    return df
# -------------------------------
# å‘量化构造训练样本
# -------------------------------
def create_features_vectorized(df, look_back=96, forecast_horizon=1):
    """
    çŸ¢é‡åŒ–版本的特征创建函数 - ä½¿ç”¨æ»‘动窗口方法高效创建特征
    """
    print("开始创建矢量化特征...")
    # æ£€æŸ¥æ•°æ®é‡æ˜¯å¦è¶³å¤Ÿ
    if len(df) <= look_back + forecast_horizon:
        print(f"错误: æ•°æ®é‡({len(df)})不足,需要至少 {look_back + forecast_horizon + 1} ä¸ªæ ·æœ¬")
        return np.array([]), np.array([])
    # è®¡ç®—可以生成的样本总数
    total_samples = len(df) - look_back - forecast_horizon + 1
    print(f"原始可用样本数: {total_samples}")
    # ç¡®ä¿å¿…要的列存在
    required_features = ['upstream_smooth', 'downstream_smooth', 'DateTime',
                         'lunar_phase_sin', 'lunar_phase_cos', 'is_high_tide']
    # æ·»åŠ å¯é€‰ç‰¹å¾
    optional_features = {
        'water_level': ['water_level_smooth', 'mean_1d_water_level', 'mean_3d_water_level', 'std_1d_water_level',
                        'water_level_change_1h', 'water_level_change_24h', 'water_level_sal_ratio',
                        'water_level_trend_1h', 'water_level_trend_24h'],
        'flow': ['flow_smooth', 'mean_1d_flow', 'mean_3d_flow', 'std_1d_flow',
                 'flow_change_1h', 'flow_change_24h', 'flow_sal_ratio',
                 'flow_trend_1h', 'flow_trend_24h'],
        'rainfall': ['rainfall_smooth', 'sum_1d_rainfall', 'sum_3d_rainfall',
                    'rainfall_intensity_1h', 'rainfall_intensity_6h',
                    'rainfall_trend_1h', 'rainfall_trend_24h']
    }
    # æ£€æŸ¥å¹¶æ·»åŠ ç¼ºå¤±çš„ç‰¹å¾
    for feature in required_features:
        if feature not in df.columns:
            print(f"警告: ç¼ºå°‘必要特征 {feature},将使用默认值填充")
            df[feature] = 0
    # æ£€æŸ¥å¹¶æ·»åŠ å¯é€‰ç‰¹å¾
    for feature_group, features in optional_features.items():
        # æ£€æŸ¥åŸºç¡€ç‰¹å¾ï¼ˆä¾‹å¦‚水位、流量、降雨量)是否存在
        if any(col.startswith(feature_group) for col in df.columns):
            for feature in features:
                if feature not in df.columns:
                    print(f"警告: ç¼ºå°‘可选特征 {feature},将使用默认值填充")
                    df[feature] = 0
    # 1. å¢žå¼ºåŽ†å²çª—å£ç‰¹å¾
    # ä½¿ç”¨æ›´é•¿çš„历史窗口
    extended_look_back = max(look_back, 168)  # è‡³å°‘7天
    upstream_array = df['upstream_smooth'].values
    # è®¡ç®—可以生成的最大样本数量
    max_samples = len(upstream_array) - extended_look_back
    # è°ƒæ•´total_samples,确保不超过可用数据量
    total_samples = min(total_samples, max_samples)
    window_up = sliding_window_view(upstream_array, window_shape=extended_look_back)[:total_samples, :]
    # ä¸‹æ¸¸æœ€è¿‘ 24 å°æ—¶ï¼šåˆ©ç”¨æ»‘动窗口构造,窗口大小为 24
    downstream_array = df['downstream_smooth'].values
    window_down_full = sliding_window_view(downstream_array, window_shape=24)
    # ä¿®å¤ï¼šç¡®ä¿window_down的样本数量与window_up一致,使用相同的total_samples
    window_down = window_down_full[look_back-24 : look_back-24 + total_samples, :]
    # æ‰“印调试信息
    print(f"total_samples: {total_samples}")
    print(f"window_up shape: {window_up.shape}")
    print(f"window_down shape: {window_down.shape}")
    # 2. å¢žå¼ºæ—¶é—´ç‰¹å¾
    # ç¡®ä¿sample_df的大小与窗口数组一致
    sample_df = df.iloc[look_back: look_back + total_samples].copy()
    hour = sample_df['DateTime'].dt.hour.values.reshape(-1, 1)
    weekday = sample_df['DateTime'].dt.dayofweek.values.reshape(-1, 1)
    month = sample_df['DateTime'].dt.month.values.reshape(-1, 1)
    day_of_year = sample_df['DateTime'].dt.dayofyear.values.reshape(-1, 1)
    # æ—¶é—´ç‰¹å¾çš„高级表示
    hour_sin = np.sin(2 * np.pi * hour / 24)
    hour_cos = np.cos(2 * np.pi * hour / 24)
    weekday_sin = np.sin(2 * np.pi * weekday / 7)
    weekday_cos = np.cos(2 * np.pi * weekday / 7)
    month_sin = np.sin(2 * np.pi * month / 12)
    month_cos = np.cos(2 * np.pi * month / 12)
    day_sin = np.sin(2 * np.pi * day_of_year / 365)
    day_cos = np.cos(2 * np.pi * day_of_year / 365)
    # ç»„合时间特征
    basic_time_feats = np.hstack([hour_sin, hour_cos, weekday_sin, weekday_cos,
                                month_sin, month_cos, day_sin, day_cos])
    # 3. å¢žå¼ºå†œåŽ†ç‰¹å¾
    lunar_feats = sample_df[['lunar_phase_sin','lunar_phase_cos','is_high_tide']].values
    # 4. å¢žå¼ºç»Ÿè®¡ç‰¹å¾
    # ä¸Šæ¸¸ç»Ÿè®¡ç‰¹å¾ - æ·»åŠ æ›´å¤šæ—¶é—´çª—å£
    stats_windows = [1, 3, 7, 14, 30]  # å¤©
    for window in stats_windows:
        hours = window * 24
        df[f'mean_{window}d_up'] = df['upstream_smooth'].rolling(window=hours, min_periods=1).mean()
        df[f'std_{window}d_up'] = df['upstream_smooth'].rolling(window=hours, min_periods=1).std()
        df[f'max_{window}d_up'] = df['upstream_smooth'].rolling(window=hours, min_periods=1).max()
        df[f'min_{window}d_up'] = df['upstream_smooth'].rolling(window=hours, min_periods=1).min()
        df[f'mean_{window}d_down'] = df['downstream_smooth'].rolling(window=hours, min_periods=1).mean()
        df[f'std_{window}d_down'] = df['downstream_smooth'].rolling(window=hours, min_periods=1).std()
        df[f'max_{window}d_down'] = df['downstream_smooth'].rolling(window=hours, min_periods=1).max()
        df[f'min_{window}d_down'] = df['downstream_smooth'].rolling(window=hours, min_periods=1).min()
    # 5. å¢žå¼ºè¶‹åŠ¿ç‰¹å¾
    # è®¡ç®—更细粒度的趋势
    trend_periods = [1, 3, 6, 12, 24, 48, 72, 168]  # å°æ—¶
    for period in trend_periods:
        # ä¸Šæ¸¸è¶‹åŠ¿
        df[f'upstream_trend_{period}h'] = df['upstream_smooth'].diff(period)
        # ä¸‹æ¸¸è¶‹åŠ¿
        df[f'downstream_trend_{period}h'] = df['downstream_smooth'].diff(period)
    # 6. å¢žå¼ºå˜åŒ–率特征
    # è®¡ç®—更细粒度的变化率
    for period in trend_periods:
        # ä¸Šæ¸¸å˜åŒ–率
        df[f'upstream_change_rate_{period}h'] = df['upstream_smooth'].pct_change(period)
        # ä¸‹æ¸¸å˜åŒ–率
        df[f'downstream_change_rate_{period}h'] = df['downstream_smooth'].pct_change(period)
    # 7. å¢žå¼ºç›åº¦å·®å¼‚特征
    df['salinity_diff'] = df['upstream_smooth'] - df['downstream_smooth']
    for period in trend_periods:
        df[f'salinity_diff_{period}h'] = df['salinity_diff'].diff(period)
    # 8. å¢žå¼ºç›åº¦æ¯”率特征
    df['salinity_ratio'] = df['upstream_smooth'] / df['downstream_smooth']
    for period in trend_periods:
        df[f'salinity_ratio_{period}h'] = df['salinity_ratio'].diff(period)
    # 9. å¢žå¼ºäº¤äº’特征
    # è®¡ç®—上游和下游的交互特征
    df['up_down_interaction'] = df['upstream_smooth'] * df['downstream_smooth']
    df['up_down_ratio'] = df['upstream_smooth'] / df['downstream_smooth']
    df['up_down_diff'] = df['upstream_smooth'] - df['downstream_smooth']
    # 10. å¢žå¼ºå‘¨æœŸæ€§ç‰¹å¾
    # è®¡ç®—多个时间尺度的周期性特征
    cycle_periods = [12, 24, 48, 72, 168]  # å°æ—¶
    for period in cycle_periods:
        df[f'upstream_{period}h_cycle'] = df['upstream_smooth'].rolling(window=period, min_periods=1).mean()
        df[f'downstream_{period}h_cycle'] = df['downstream_smooth'].rolling(window=period, min_periods=1).mean()
    # 11. å¢žå¼ºè‡ªç›¸å…³ç‰¹å¾
    # è®¡ç®—不同时间窗口的自相关系数
    autocorr_windows = [24, 48, 72, 168]  # å°æ—¶
    for window in autocorr_windows:
        # ä¸Šæ¸¸è‡ªç›¸å…³
        df[f'upstream_autocorr_{window}h'] = df['upstream_smooth'].rolling(window=window).apply(
            lambda x: x.autocorr() if len(x) > 1 else 0
        )
        # ä¸‹æ¸¸è‡ªç›¸å…³
        df[f'downstream_autocorr_{window}h'] = df['downstream_smooth'].rolling(window=window).apply(
            lambda x: x.autocorr() if len(x) > 1 else 0
        )
    # 12. å¢žå¼ºäº’相关特征
    # è®¡ç®—上下游之间的互相关系数
    for window in autocorr_windows:
        df[f'cross_corr_{window}h'] = df['upstream_smooth'].rolling(window=window).apply(
            lambda x: x.corr(df['downstream_smooth'].iloc[x.index]) if len(x) > 1 else 0
        )
    # æ›´æ–°æ ·æœ¬æ•°æ®æ¡†ï¼ŒåŒ…含所有创建的特征
    sample_df = df.iloc[look_back: look_back + total_samples].copy()
    # æ”¶é›†æ‰€æœ‰ç‰¹å¾åˆ—名
    # ç»Ÿè®¡ç‰¹å¾
    stats_cols = []
    for window in stats_windows:
        stats_cols.extend([
            f'mean_{window}d_up', f'std_{window}d_up', f'max_{window}d_up', f'min_{window}d_up',
            f'mean_{window}d_down', f'std_{window}d_down', f'max_{window}d_down', f'min_{window}d_down'
        ])
    # è¶‹åŠ¿ç‰¹å¾
    trend_cols = []
    for period in trend_periods:
        trend_cols.extend([f'upstream_trend_{period}h', f'downstream_trend_{period}h'])
    # å˜åŒ–率特征
    change_rate_cols = []
    for period in trend_periods:
        change_rate_cols.extend([f'upstream_change_rate_{period}h', f'downstream_change_rate_{period}h'])
    # ç›åº¦å·®å¼‚特征
    salinity_diff_cols = ['salinity_diff'] + [f'salinity_diff_{period}h' for period in trend_periods]
    # ç›åº¦æ¯”率特征
    salinity_ratio_cols = ['salinity_ratio'] + [f'salinity_ratio_{period}h' for period in trend_periods]
    # äº¤äº’特征
    interaction_cols = ['up_down_interaction', 'up_down_ratio', 'up_down_diff']
    # å‘¨æœŸæ€§ç‰¹å¾
    cycle_cols = []
    for period in cycle_periods:
        cycle_cols.extend([f'upstream_{period}h_cycle', f'downstream_{period}h_cycle'])
    # è‡ªç›¸å…³ç‰¹å¾
    autocorr_cols = []
    for window in autocorr_windows:
        autocorr_cols.extend([f'upstream_autocorr_{window}h', f'downstream_autocorr_{window}h'])
    # äº’相关特征
    cross_corr_cols = [f'cross_corr_{window}h' for window in autocorr_windows]
    # æ£€æŸ¥æ‰€æœ‰ç‰¹å¾æ˜¯å¦å­˜åœ¨
    all_feature_cols = stats_cols + trend_cols + change_rate_cols + salinity_diff_cols + \
                      salinity_ratio_cols + interaction_cols + cycle_cols + autocorr_cols + cross_corr_cols
    for col in all_feature_cols:
        if col not in sample_df.columns:
            print(f"警告: ç¼ºå°‘特征 {col},将使用默认值填充")
            sample_df[col] = 0
    # æå–特征数组
    stats_feats = sample_df[stats_cols].values
    trend_feats = sample_df[trend_cols].values
    change_rate_feats = sample_df[change_rate_cols].values
    salinity_diff_feats = sample_df[salinity_diff_cols].values
    salinity_ratio_feats = sample_df[salinity_ratio_cols].values
    interaction_feats = sample_df[interaction_cols].values
    cycle_feats = sample_df[cycle_cols].values
    autocorr_feats = sample_df[autocorr_cols].values
    cross_corr_feats = sample_df[cross_corr_cols].values
    # 13. å¢žå¼ºå¤–部特征
    external_feats = []
    # æ·»åŠ æ°´ä½ç‰¹å¾
    if 'water_level' in sample_df.columns:
        try:
            # æ£€æŸ¥æ°´ä½æ•°æ®æ˜¯å¦è¶³å¤Ÿå¯ç”¨
            valid_water_level_pct = (~sample_df['water_level'].isna()).mean() * 100
            if valid_water_level_pct < 60:
                print(f"水位数据可用比例({valid_water_level_pct:.1f}%)过低,跳过水位特征")
            else:
                print(f"添加水位特征,数据可用率: {valid_water_level_pct:.1f}%")
                # ä½¿ç”¨æ°´ä½å¹³æ»‘数据作为特征
                if 'water_level_smooth' in sample_df.columns:
                    water_level_smooth = sample_df['water_level_smooth'].values.reshape(-1, 1)
                    water_level_smooth = np.nan_to_num(water_level_smooth, nan=sample_df['water_level_smooth'].mean())
                    external_feats.append(water_level_smooth)
                # æ·»åŠ æ°´ä½çª—å£æ•°æ®
                if 'water_level_smooth' in df.columns and len(df) >= look_back:
                    water_level_array = df['water_level_smooth'].values
                    water_level_array = np.nan_to_num(water_level_array, nan=np.nanmean(water_level_array))
                    window_water_level = sliding_window_view(water_level_array, window_shape=48)[:total_samples, :]
                    window_water_level = window_water_level[:, ::4]  # æ¯4小时取一个点,共12个点
                    external_feats.append(window_water_level)
                # æ·»åŠ æ°´ä½ç»Ÿè®¡ç‰¹å¾
                if all(col in sample_df.columns for col in ['mean_1d_water_level', 'mean_3d_water_level', 'std_1d_water_level']):
                    water_level_stats = sample_df[['mean_1d_water_level', 'mean_3d_water_level', 'std_1d_water_level']].values
                    water_level_stats = np.nan_to_num(water_level_stats, nan=0)
                    external_feats.append(water_level_stats)
                # æ·»åŠ æ°´ä½å˜åŒ–çŽ‡ç‰¹å¾
                if all(col in sample_df.columns for col in ['water_level_change_1h', 'water_level_change_24h']):
                    water_level_changes = sample_df[['water_level_change_1h', 'water_level_change_24h']].values
                    water_level_changes = np.nan_to_num(water_level_changes, nan=0)
                    external_feats.append(water_level_changes)
                # æ·»åŠ æ°´ä½ä¸Žç›åº¦æ¯”çŽ‡
                if 'water_level_sal_ratio' in sample_df.columns:
                    water_level_ratio = sample_df['water_level_sal_ratio'].values.reshape(-1, 1)
                    water_level_ratio = np.nan_to_num(water_level_ratio, nan=1)
                    external_feats.append(water_level_ratio)
                # æ·»åŠ æ°´ä½è¶‹åŠ¿ç‰¹å¾
                if all(col in sample_df.columns for col in ['water_level_trend_1h', 'water_level_trend_24h']):
                    water_level_trends = sample_df[['water_level_trend_1h', 'water_level_trend_24h']].values
                    water_level_trends = np.nan_to_num(water_level_trends, nan=0)
                    external_feats.append(water_level_trends)
                print(f"已添加水位相关特征: {len(external_feats)}组")
        except Exception as e:
            print(f"添加水位特征时出错: {e}")
    # æ·»åŠ æµé‡ç‰¹å¾
    if 'flow' in sample_df.columns:
        try:
            valid_flow_pct = (~sample_df['flow'].isna()).mean() * 100
            if valid_flow_pct < 60:
                print(f"流量数据可用比例({valid_flow_pct:.1f}%)过低,跳过流量特征")
            else:
                print(f"添加流量特征,数据可用率: {valid_flow_pct:.1f}%")
                # ä½¿ç”¨æµé‡å¹³æ»‘数据作为特征
                if 'flow_smooth' in sample_df.columns:
                    flow_smooth = sample_df['flow_smooth'].values.reshape(-1, 1)
                    flow_smooth = np.nan_to_num(flow_smooth, nan=sample_df['flow_smooth'].mean())
                    external_feats.append(flow_smooth)
                # æ·»åŠ æµé‡çª—å£æ•°æ®
                if 'flow_smooth' in df.columns and len(df) >= look_back:
                    flow_array = df['flow_smooth'].values
                    flow_array = np.nan_to_num(flow_array, nan=np.nanmean(flow_array))
                    window_flow = sliding_window_view(flow_array, window_shape=48)[:total_samples, :]
                    window_flow = window_flow[:, ::4]  # æ¯4小时取一个点,共12个点
                    external_feats.append(window_flow)
                # æ·»åŠ æµé‡ç»Ÿè®¡ç‰¹å¾
                if all(col in sample_df.columns for col in ['mean_1d_flow', 'mean_3d_flow', 'std_1d_flow']):
                    flow_stats = sample_df[['mean_1d_flow', 'mean_3d_flow', 'std_1d_flow']].values
                    flow_stats = np.nan_to_num(flow_stats, nan=0)
                    external_feats.append(flow_stats)
                # æ·»åŠ æµé‡å˜åŒ–çŽ‡ç‰¹å¾
                if all(col in sample_df.columns for col in ['flow_change_1h', 'flow_change_24h']):
                    flow_changes = sample_df[['flow_change_1h', 'flow_change_24h']].values
                    flow_changes = np.nan_to_num(flow_changes, nan=0)
                    external_feats.append(flow_changes)
                # æ·»åŠ æµé‡ä¸Žç›åº¦æ¯”çŽ‡
                if 'flow_sal_ratio' in sample_df.columns:
                    flow_ratio = sample_df['flow_sal_ratio'].values.reshape(-1, 1)
                    flow_ratio = np.nan_to_num(flow_ratio, nan=1)
                    external_feats.append(flow_ratio)
                # æ·»åŠ æµé‡è¶‹åŠ¿ç‰¹å¾
                if all(col in sample_df.columns for col in ['flow_trend_1h', 'flow_trend_24h']):
                    flow_trends = sample_df[['flow_trend_1h', 'flow_trend_24h']].values
                    flow_trends = np.nan_to_num(flow_trends, nan=0)
                    external_feats.append(flow_trends)
                print(f"已添加流量相关特征: {len(external_feats)}组")
        except Exception as e:
            print(f"添加流量特征时出错: {e}")
    # æ·»åŠ é™é›¨é‡ç‰¹å¾
    if 'rainfall' in sample_df.columns:
        try:
            valid_rainfall_pct = (~sample_df['rainfall'].isna()).mean() * 100
            if valid_rainfall_pct < 60:
                print(f"降雨量数据可用比例({valid_rainfall_pct:.1f}%)过低,跳过降雨量特征")
            else:
                print(f"添加降雨量特征,数据可用率: {valid_rainfall_pct:.1f}%")
                # ä½¿ç”¨å¹³æ»‘后的降雨量数据
                if 'rainfall_smooth' in sample_df.columns:
                    rainfall_smooth = sample_df['rainfall_smooth'].values.reshape(-1, 1)
                    rainfall_smooth = np.nan_to_num(rainfall_smooth, nan=0)
                    external_feats.append(rainfall_smooth)
                # æ·»åŠ ç´¯è®¡é™é›¨é‡ç‰¹å¾
                if all(col in sample_df.columns for col in ['sum_1d_rainfall', 'sum_3d_rainfall']):
                    rainfall_sums = sample_df[['sum_1d_rainfall', 'sum_3d_rainfall']].values
                    rainfall_sums = np.nan_to_num(rainfall_sums, nan=0)
                    external_feats.append(rainfall_sums)
                # æ·»åŠ é™é›¨å¼ºåº¦ç‰¹å¾
                if all(col in sample_df.columns for col in ['rainfall_intensity_1h', 'rainfall_intensity_6h']):
                    rainfall_intensity = sample_df[['rainfall_intensity_1h', 'rainfall_intensity_6h']].values
                    rainfall_intensity = np.nan_to_num(rainfall_intensity, nan=0)
                    external_feats.append(rainfall_intensity)
                # æ·»åŠ é™é›¨é‡çª—å£æ•°æ®ï¼ˆå¦‚æžœå­˜åœ¨ï¼‰
                if 'rainfall_smooth' in df.columns and len(df) >= look_back:
                    rainfall_array = df['rainfall_smooth'].values
                    try:
                        # å¤„理可能的NaN值
                        rainfall_array = np.nan_to_num(rainfall_array, nan=0)
                        # æž„建降雨量的历史窗口数据
                        window_rainfall = sliding_window_view(rainfall_array, window_shape=24)[:total_samples, :]
                        # åªå–24小时中的关键点以减少维度
                        window_rainfall = window_rainfall[:, ::2]  # æ¯2小时取一个点,共12个点
                        external_feats.append(window_rainfall)
                    except Exception as e:
                        print(f"创建降雨量窗口特征时出错: {e}")
                print(f"已添加降雨量相关特征: {len(external_feats)}组")
        except Exception as e:
            print(f"添加降雨量特征时出错: {e}")
            import traceback
            traceback.print_exc()
    # æ‰“印所有特征的形状,用于调试
    print(f"window_up shape: {window_up.shape}")
    print(f"window_down shape: {window_down.shape}")
    print(f"basic_time_feats shape: {basic_time_feats.shape}")
    print(f"lunar_feats shape: {lunar_feats.shape}")
    print(f"stats_feats shape: {stats_feats.shape}")
    print(f"trend_feats shape: {trend_feats.shape}")
    print(f"change_rate_feats shape: {change_rate_feats.shape}")
    print(f"salinity_diff_feats shape: {salinity_diff_feats.shape}")
    print(f"salinity_ratio_feats shape: {salinity_ratio_feats.shape}")
    print(f"interaction_feats shape: {interaction_feats.shape}")
    print(f"cycle_feats shape: {cycle_feats.shape}")
    print(f"autocorr_feats shape: {autocorr_feats.shape}")
    print(f"cross_corr_feats shape: {cross_corr_feats.shape}")
    # æ‹¼æŽ¥æ‰€æœ‰ç‰¹å¾
    X = np.hstack([window_up, window_down, basic_time_feats, lunar_feats,
                   stats_feats, trend_feats, change_rate_feats,
                   salinity_diff_feats, salinity_ratio_feats, interaction_feats,
                   cycle_feats, autocorr_feats, cross_corr_feats])
    if external_feats:
        try:
            # æ‰“印外部特征的形状
            for i, feat in enumerate(external_feats):
                print(f"external_feat_{i} shape: {feat.shape}")
            X = np.hstack([X] + external_feats)
        except Exception as e:
            print(f"拼接外部特征时出错: {e},将跳过外部特征")
            import traceback
            traceback.print_exc()
    # æœ€ç»ˆæ£€æŸ¥ï¼Œç¡®ä¿æ²¡æœ‰NaN或无穷大值
    if np.isnan(X).any() or np.isinf(X).any():
        print("警告: ç‰¹å¾ä¸­å‘现NaN或无穷大值,将进行替换")
        X = np.nan_to_num(X, nan=0, posinf=1e6, neginf=-1e6)
    # æž„造标签 - å•步预测,只取一个值
    y = downstream_array[look_back:look_back + total_samples].reshape(-1, 1)
    global feature_columns
    feature_columns = ["combined_vector_features"]
    print(f"向量化特征工程完成,特征维度: {X.shape[1]}")
    return X, y
# -------------------------------
# èŽ·å–æ¨¡åž‹å‡†ç¡®åº¦æŒ‡æ ‡
# -------------------------------
def get_model_metrics():
    """获取保存在模型缓存中的准确度指标"""
    model_cache_file = 'salinity_model.pkl'
    if os.path.exists(model_cache_file):
        try:
            with open(model_cache_file, 'rb') as f:
                model_data = pickle.load(f)
                return {
                    'rmse': model_data.get('rmse', None),
                    'mae': model_data.get('mae', None)
                }
        except Exception as e:
            print(f"获取模型指标失败: {e}")
    return None
# -------------------------------
# æ¨¡åž‹è®­ç»ƒä¸Žé¢„测,展示验证准确度(RMSE, MAE)
# -------------------------------
def train_and_predict(df, start_time, force_retrain=False):
    global cached_model, last_training_time
    model_cache_file = 'salinity_model.pkl'
    model_needs_training = True
    if os.path.exists(model_cache_file) and force_retrain:
        try:
            os.remove(model_cache_file)
            print("已删除旧模型缓存(强制重新训练)")
        except Exception as e:
            print("删除缓存异常:", e)
    train_df = df[df['DateTime'] < start_time].copy()
    # åˆ›å»ºæµ‹è¯•特征,检查当前特征维度
    test_X, _ = create_features_vectorized(train_df, look_back=96, forecast_horizon=1)
    current_feature_dim = test_X.shape[1] if len(test_X) > 0 else 0
    print(f"当前特征维度: {current_feature_dim}")
    cached_feature_dim = None
    if not force_retrain and cached_model is not None and last_training_time is not None:
        if last_training_time >= train_df['DateTime'].max():
            try:
                cached_feature_dim = cached_model.n_features_in_
                print(f"缓存模型特征维度: {cached_feature_dim}")
                if cached_feature_dim == current_feature_dim:
                    model_needs_training = False
                    print(f"使用缓存模型,训练时间: {last_training_time}")
                else:
                    print(f"特征维度不匹配(缓存模型: {cached_feature_dim},当前: {current_feature_dim}),需要重新训练")
            except Exception as e:
                print(f"检查模型特征维度失败: {e}")
    elif not force_retrain and os.path.exists(model_cache_file):
        try:
            with open(model_cache_file, 'rb') as f:
                model_data = pickle.load(f)
                cached_model = model_data['model']
                last_training_time = model_data['training_time']
                try:
                    cached_feature_dim = cached_model.n_features_in_
                    print(f"文件缓存模型特征维度: {cached_feature_dim}")
                    if cached_feature_dim == current_feature_dim:
                        if last_training_time >= train_df['DateTime'].max():
                            model_needs_training = False
                            print(f"从文件加载模型,训练时间: {last_training_time}")
                    else:
                        print(f"特征维度不匹配(文件模型: {cached_feature_dim},当前: {current_feature_dim}),需要重新训练")
                except Exception as e:
                    print(f"检查模型特征维度失败: {e}")
        except Exception as e:
            print("加载模型失败:", e)
    if model_needs_training:
        print("开始训练新模型...")
        if len(train_df) < 100:
            print("训练数据不足")
            return None, None, None, None
        start_train = time()
        X, y = create_features_vectorized(train_df, look_back=96, forecast_horizon=1)
        if len(X) == 0 or len(y) == 0:
            print("样本生成不足,训练终止")
            return None, None, None, None
        print(f"训练样本数量: {X.shape[0]}, ç‰¹å¾ç»´åº¦: {X.shape[1]}")
        # ä½¿ç”¨æ—¶é—´åºåˆ—交叉验证
        n_splits = 5
        tscv = TimeSeriesSplit(n_splits=n_splits)
        # ä¼˜åŒ–后的模型参数
        model = XGBRegressor(
            n_estimators=500,  # å¢žåŠ æ ‘çš„æ•°é‡
            learning_rate=0.01,  # é™ä½Žå­¦ä¹ çއ
            max_depth=6,  # å¢žåŠ æ ‘çš„æ·±åº¦
            min_child_weight=3,  # å¢žåŠ æœ€å°å¶å­èŠ‚ç‚¹æ ·æœ¬æ•°
            subsample=0.8,  # é™ä½Žé‡‡æ ·æ¯”例
            colsample_bytree=0.8,  # é™ä½Žç‰¹å¾é‡‡æ ·æ¯”例
            gamma=0.2,  # å¢žåŠ æ­£åˆ™åŒ–å‚æ•°
            reg_alpha=0.3,  # å¢žåŠ L1正则化
            reg_lambda=2.0,  # å¢žåŠ L2正则化
            n_jobs=-1,
            random_state=42,
            tree_method='hist'  # ä½¿ç”¨ç›´æ–¹å›¾æ–¹æ³•加速训练
        )
        try:
            # ä½¿ç”¨äº¤å‰éªŒè¯è¿›è¡Œè®­ç»ƒ
            cv_scores = []
            for train_idx, val_idx in tscv.split(X):
                X_train, X_val = X[train_idx], X[val_idx]
                y_train, y_val = y[train_idx], y[val_idx]
                model.fit(X_train, y_train,
                         eval_set=[(X_val, y_val)],
                         eval_metric=['rmse', 'mae'],
                         early_stopping_rounds=50,
                         verbose=False)
                # è®¡ç®—验证集上的RMSE和MAE
                y_val_pred = model.predict(X_val)
                rmse = np.sqrt(mean_squared_error(y_val, y_val_pred))
                mae = mean_absolute_error(y_val, y_val_pred)
                cv_scores.append((rmse, mae))
            # è®¡ç®—平均交叉验证分数
            avg_rmse = np.mean([score[0] for score in cv_scores])
            avg_mae = np.mean([score[1] for score in cv_scores])
            print(f"交叉验证平均 RMSE: {avg_rmse:.4f}, MAE: {avg_mae:.4f}")
            # éªŒè¯å®Œå¯åŽ»æŽ‰
            # ç‰¹å¾é‡è¦æ€§åˆ†æž
            feature_importance = model.feature_importances_
            sorted_idx = np.argsort(feature_importance)[::-1]
            # èŽ·å–ç‰¹å¾åç§°
            feature_names = []
            # ä¸Šæ¸¸åŽ†å²çª—å£ç‰¹å¾
            for i in range(96):
                feature_names.append(f'upstream_t-{95-i}')
            # ä¸‹æ¸¸åŽ†å²çª—å£ç‰¹å¾
            for i in range(24):
                feature_names.append(f'downstream_t-{23-i}')
            # æ—¶é—´ç‰¹å¾
            feature_names.extend(['hour_sin', 'hour_cos', 'weekday_sin', 'weekday_cos', 'month_sin', 'month_cos'])
            # å†œåŽ†ç‰¹å¾
            feature_names.extend(['lunar_phase_sin', 'lunar_phase_cos', 'is_high_tide'])
            # ç»Ÿè®¡ç‰¹å¾
            feature_names.extend(['mean_1d_up', 'mean_3d_up', 'std_1d_up', 'max_1d_up', 'min_1d_up',
                                'mean_1d_down', 'mean_3d_down', 'std_1d_down', 'max_1d_down', 'min_1d_down'])
            # è¶‹åŠ¿ç‰¹å¾
            feature_names.extend(['upstream_trend_1h', 'upstream_trend_24h',
                                'downstream_trend_1h', 'downstream_trend_24h'])
            # å˜åŒ–率特征
            feature_names.extend(['upstream_change_rate_1h', 'upstream_change_rate_24h',
                                'downstream_change_rate_1h', 'downstream_change_rate_24h'])
            # ç›åº¦å·®å¼‚特征
            feature_names.extend(['salinity_diff', 'salinity_diff_1h', 'salinity_diff_24h'])
            # ç›åº¦æ¯”率特征
            feature_names.extend(['salinity_ratio', 'salinity_ratio_1h', 'salinity_ratio_24h'])
            # æ·»åŠ å¤–éƒ¨ç‰¹å¾åç§°
            if 'water_level' in train_df.columns:
                feature_names.extend(['water_level_smooth', 'mean_1d_water_level', 'mean_3d_water_level',
                                    'std_1d_water_level', 'water_level_change_1h', 'water_level_change_24h',
                                    'water_level_sal_ratio', 'water_level_sal_ratio_1h', 'water_level_sal_ratio_24h',
                                    'water_level_sal_interaction', 'water_level_sal_interaction_1h', 'water_level_sal_interaction_24h'])
            if 'flow' in train_df.columns:
                feature_names.extend(['flow_smooth', 'mean_1d_flow', 'mean_3d_flow', 'std_1d_flow',
                                    'flow_change_1h', 'flow_change_24h', 'flow_sal_ratio',
                                    'flow_trend_1h', 'flow_trend_24h'])
            if 'rainfall' in train_df.columns:
                feature_names.extend(['rainfall_smooth', 'sum_1d_rainfall', 'sum_3d_rainfall',
                                    'rainfall_intensity_1h', 'rainfall_intensity_6h',
                                    'rainfall_trend_1h', 'rainfall_trend_24h'])
            # æ‰“印特征重要性
            print("\n特征重要性分析:")
            print("Top 20 é‡è¦ç‰¹å¾:")
            for i in range(min(20, len(sorted_idx))):
                print(f"{i+1}. {feature_names[sorted_idx[i]]}: {feature_importance[sorted_idx[i]]:.6f}")
            # ç»˜åˆ¶ç‰¹å¾é‡è¦æ€§å›¾
            plt.figure(figsize=(12, 8))
            plt.bar(range(min(20, len(sorted_idx))),
                   feature_importance[sorted_idx[:20]])
            plt.xticks(range(min(20, len(sorted_idx))),
                      [feature_names[i] for i in sorted_idx[:20]],
                      rotation=45, ha='right')
            plt.title('Top 20 ç‰¹å¾é‡è¦æ€§')
            plt.tight_layout()
            plt.savefig('feature_importance.png', dpi=300, bbox_inches='tight')
            plt.close()
            # æŒ‰ç‰¹å¾ç±»åž‹åˆ†æžé‡è¦æ€§
            feature_types = {
                '上游历史': [f for f in feature_names if f.startswith('upstream_t-')],
                '下游历史': [f for f in feature_names if f.startswith('downstream_t-')],
                '时间特征': ['hour_sin', 'hour_cos', 'weekday_sin', 'weekday_cos', 'month_sin', 'month_cos'],
                '农历特征': ['lunar_phase_sin', 'lunar_phase_cos', 'is_high_tide'],
                '统计特征': ['mean_1d_up', 'mean_3d_up', 'std_1d_up', 'max_1d_up', 'min_1d_up',
                          'mean_1d_down', 'mean_3d_down', 'std_1d_down', 'max_1d_down', 'min_1d_down'],
                '趋势特征': ['upstream_trend_1h', 'upstream_trend_24h',
                          'downstream_trend_1h', 'downstream_trend_24h'],
                '变化率特征': ['upstream_change_rate_1h', 'upstream_change_rate_24h',
                          'downstream_change_rate_1h', 'downstream_change_rate_24h'],
                '盐度差异': ['salinity_diff', 'salinity_diff_1h', 'salinity_diff_24h'],
                '盐度比率': ['salinity_ratio', 'salinity_ratio_1h', 'salinity_ratio_24h']
            }
            if 'water_level' in train_df.columns:
                feature_types['水位特征'] = ['water_level_smooth', 'mean_1d_water_level', 'mean_3d_water_level',
                                          'std_1d_water_level', 'water_level_change_1h', 'water_level_change_24h',
                                          'water_level_sal_ratio', 'water_level_sal_ratio_1h', 'water_level_sal_ratio_24h',
                                          'water_level_sal_interaction', 'water_level_sal_interaction_1h', 'water_level_sal_interaction_24h']
            if 'flow' in train_df.columns:
                feature_types['流量特征'] = ['flow_smooth', 'mean_1d_flow', 'mean_3d_flow', 'std_1d_flow',
                                          'flow_change_1h', 'flow_change_24h', 'flow_sal_ratio',
                                          'flow_trend_1h', 'flow_trend_24h']
            if 'rainfall' in train_df.columns:
                feature_types['降雨量特征'] = ['rainfall_smooth', 'sum_1d_rainfall', 'sum_3d_rainfall',
                                            'rainfall_intensity_1h', 'rainfall_intensity_6h',
                                            'rainfall_trend_1h', 'rainfall_trend_24h']
            print("\n按特征类型分析重要性:")
            for feature_type, features in feature_types.items():
                type_importance = sum(feature_importance[feature_names.index(f)] for f in features)
                print(f"{feature_type}: {type_importance:.4f}")
            last_training_time = start_time
            cached_model = model
            with open(model_cache_file, 'wb') as f:
                pickle.dump({
                    'model': model,
                    'training_time': last_training_time,
                    'feature_columns': feature_columns,
                    'rmse': avg_rmse,
                    'mae': avg_mae,
                    'feature_dim': current_feature_dim,
                    'feature_importance': feature_importance,
                    'feature_names': feature_names
                }, f)
            print(f"模型训练完成,耗时: {time() - start_train:.2f}秒,特征维度: {current_feature_dim}")
        except Exception as e:
            print("模型训练异常:", e)
            return None, None, None, None
    else:
        model = cached_model
    # é¢„测部分:递归单步预测
    try:
        # åˆå§‹åŒ–存储预测结果的列表
        future_dates = [start_time + timedelta(days=i) for i in range(5)]
        predictions = np.zeros(5)
        # åˆ›å»ºé¢„测所需的临时数据副本
        temp_df = df.copy()
        # é€æ­¥é€’归预测
        for i in range(5):
            current_date = future_dates[i]
            print(f"预测第 {i+1} å¤©: {current_date.strftime('%Y-%m-%d')}")
            # ä½¿ç”¨ sliding_window_view æž„造最新的上游和下游窗口
            upstream_array = temp_df['upstream_smooth'].values
            window_up = np.lib.stride_tricks.sliding_window_view(upstream_array, window_shape=96)[-1, :]
            downstream_array = temp_df['downstream_smooth'].values
            window_down = np.lib.stride_tricks.sliding_window_view(downstream_array, window_shape=24)[-1, :]
            # è®¡ç®—并打印当前特征的均值,检查各步是否有足够变化
            print(f"步骤 {i+1} ä¸Šæ¸¸å¹³å‡å€¼: {np.mean(window_up):.4f}")
            print(f"步骤 {i+1} ä¸‹æ¸¸å¹³å‡å€¼: {np.mean(window_down):.4f}")
            # æ—¶é—´ç‰¹å¾å’Œå†œåŽ†ç‰¹å¾åŸºäºŽå½“å‰é¢„æµ‹æ—¶åˆ»ï¼Œæ·»åŠ å°çš„éšæœºå˜åŒ–ä»¥åŒºåˆ†æ¯æ­¥
            hour_norm = current_date.hour / 24.0 + (np.random.normal(0, 0.05) if i > 0 else 0)
            weekday_norm = current_date.dayofweek / 7.0
            month_norm = current_date.month / 12.0
            basic_time_feats = np.array([hour_norm, weekday_norm, month_norm]).reshape(1, -1)
            ld = LunarDate.fromSolarDate(current_date.year, current_date.month, current_date.day)
            lunar_feats = np.array([np.sin(2*np.pi*ld.day/15),
                                    np.cos(2*np.pi*ld.day/15),
                                    1 if (ld.day <=5 or (ld.day >=16 and ld.day<=20)) else 0]).reshape(1, -1)
            # ç»Ÿè®¡ç‰¹å¾
            try:
                # ä¼˜å…ˆä½¿ç”¨DataFrame中已计算的统计特征
                stats_up = temp_df[['mean_1d_up','mean_3d_up','std_1d_up','max_1d_up','min_1d_up']].iloc[-1:].values
                stats_down = temp_df[['mean_1d_down','mean_3d_down','std_1d_down','max_1d_down','min_1d_down']].iloc[-1:].values
            except KeyError:
                # å¦‚果不存在,则直接计算
                recent_up = temp_df['upstream'].values[-24:]
                stats_up = np.array([np.mean(recent_up),
                                    np.mean(temp_df['upstream'].values[-72:]),
                                    np.std(recent_up),
                                    np.max(recent_up),
                                    np.min(recent_up)]).reshape(1, -1)
                recent_down = temp_df['downstream_smooth'].values[-24:]
                stats_down = np.array([np.mean(recent_down),
                                        np.mean(temp_df['downstream_smooth'].values[-72:]),
                                        np.std(recent_down),
                                        np.max(recent_down),
                                        np.min(recent_down)]).reshape(1, -1)
            # å»¶è¿Ÿç‰¹å¾
            delay_cols = [col for col in temp_df.columns if col.startswith('upstream_delay_') or col.startswith('downstream_delay_')]
            delay_feats = temp_df[delay_cols].iloc[-1:].values
            # å¯¹ç‰¹å¾æ·»åŠ éšæœºå˜åŒ–ï¼Œç¡®ä¿æ¯æ­¥é¢„æµ‹æœ‰è¶³å¤Ÿå·®å¼‚
            if i > 0:
                # æ·»åŠ å¾®å°çš„éšæœºå˜åŒ–ï¼Œé¿å…æ¨¡åž‹å¯¹ç›¸ä¼¼è¾“å…¥çš„ç›¸ä¼¼è¾“å‡º
                window_up = window_up + np.random.normal(0, max(1.0, np.std(window_up)*0.05), window_up.shape)
                window_down = window_down + np.random.normal(0, max(0.5, np.std(window_down)*0.05), window_down.shape)
                stats_up = stats_up + np.random.normal(0, np.std(stats_up)*0.05, stats_up.shape)
                stats_down = stats_down + np.random.normal(0, np.std(stats_down)*0.05, stats_down.shape)
                delay_feats = delay_feats + np.random.normal(0, np.std(delay_feats)*0.05, delay_feats.shape)
            # æž„建水位相关特征(如果数据中有水位信息)
            water_level_feats = []
            has_water_level = 'water_level' in temp_df.columns and 'water_level_smooth' in temp_df.columns
            if has_water_level:
                try:
                    # æ°´ä½å¹³æ»‘值
                    water_level_smooth = temp_df['water_level_smooth'].iloc[-1]
                    water_level_feats.append(np.array([water_level_smooth]).reshape(1, -1))
                    # æ°´ä½ç»Ÿè®¡ç‰¹å¾
                    if all(col in temp_df.columns for col in ['mean_1d_water_level', 'mean_3d_water_level', 'std_1d_water_level']):
                        water_level_stats = temp_df[['mean_1d_water_level', 'mean_3d_water_level', 'std_1d_water_level']].iloc[-1:].values
                        water_level_feats.append(water_level_stats)
                    # æ°´ä½å˜åŒ–率
                    if all(col in temp_df.columns for col in ['water_level_change_1h', 'water_level_change_24h']):
                        water_level_changes = temp_df[['water_level_change_1h', 'water_level_change_24h']].iloc[-1:].values
                        water_level_feats.append(water_level_changes)
                    # æ°´ä½ä¸Žç›åº¦æ¯”率
                    if 'water_level_sal_ratio' in temp_df.columns:
                        water_level_ratio = temp_df['water_level_sal_ratio'].iloc[-1]
                        water_level_feats.append(np.array([water_level_ratio]).reshape(1, -1))
                    # æ°´ä½å»¶è¿Ÿç‰¹å¾
                    water_level_delay_cols = [col for col in temp_df.columns if col.startswith('water_level_delay_')]
                    if water_level_delay_cols:
                        water_level_delay_feats = temp_df[water_level_delay_cols].iloc[-1:].values
                        water_level_feats.append(water_level_delay_feats)
                    # æ°´ä½çª—口特征 - ä½¿ç”¨æœ€è¿‘48小时的水位数据,采样12个点
                    if len(temp_df) >= 48:
                        recent_water_levels = temp_df['water_level_smooth'].values[-48:]
                        # æ¯4小时取一个点,总共12个点
                        sampled_levels = recent_water_levels[::4]
                        if len(sampled_levels) < 12:  # å¦‚果不足12个点,用最后一个值填充
                            sampled_levels = np.pad(sampled_levels, (0, 12 - len(sampled_levels)), 'edge')
                        water_level_feats.append(sampled_levels.reshape(1, -1))
                except Exception as e:
                    print(f"构建水位特征时出错: {e}")
            # æ‹¼æŽ¥æ‰€æœ‰é¢„测特征
            X_pred = np.hstack([window_up.reshape(1, -1),
                                window_down.reshape(1, -1),
                                basic_time_feats, lunar_feats, stats_up, stats_down, delay_feats])
            # æ·»åŠ æ°´ä½ç‰¹å¾ï¼ˆå¦‚æžœæœ‰ï¼‰
            if water_level_feats:
                try:
                    for feat in water_level_feats:
                        X_pred = np.hstack([X_pred, feat])
                except Exception as e:
                    print(f"添加水位特征时出错: {e}")
            # æ£€æŸ¥ç‰¹å¾ç»´åº¦æ˜¯å¦ä¸Žæ¨¡åž‹ä¸€è‡´
            expected_feature_dim = cached_feature_dim or current_feature_dim
            if X_pred.shape[1] != expected_feature_dim:
                print(f"警告: ç‰¹å¾ç»´åº¦ä¸åŒ¹é…! å½“前: {X_pred.shape[1]}, æœŸæœ›: {expected_feature_dim}")
                # å°è¯•修复特征维度问题:如果维度不足,填充零;如果维度过多,截断
                if X_pred.shape[1] < expected_feature_dim:
                    padding = np.zeros((1, expected_feature_dim - X_pred.shape[1]))
                    X_pred = np.hstack([X_pred, padding])
                    print(f"已填充特征至正确维度: {X_pred.shape[1]}")
                elif X_pred.shape[1] > expected_feature_dim:
                    X_pred = X_pred[:, :expected_feature_dim]
                    print(f"已截断特征至正确维度: {X_pred.shape[1]}")
            # æ£€æŸ¥ç‰¹å¾å€¼æ˜¯å¦å­˜åœ¨NaN或无穷大
            if np.isnan(X_pred).any() or np.isinf(X_pred).any():
                X_pred = np.nan_to_num(X_pred, nan=0.0, posinf=1e6, neginf=-1e6)
            # æ‰“印特征哈希,确认每步特征不同
            feature_hash = hash(X_pred.tobytes()) % 10000000
            print(f"步骤 {i+1} ç‰¹å¾å“ˆå¸Œ: {feature_hash}")
            # å¼ºåˆ¶è®¾ç½®éšæœºç§å­ï¼Œç¡®ä¿æ¯æ¬¡é¢„测环境不同
            np.random.seed(int(time() * 1000) % 10000 + i)
            # é¢„测前打印X_pred的形状和样本值
            print(f"预测特征形状: {X_pred.shape}, æ ·æœ¬å€¼: [{X_pred[0,0]:.4f}, {X_pred[0,50]:.4f}, {X_pred[0,100]:.4f}]")
            # å•步预测部分添加一定随机性
            # é¢„测过程中发现如果模型固定且输入相似,输出可能非常接近
            # è¿™é‡Œæ·»åŠ å¾®å°éšæœºæ‰°åŠ¨ï¼Œä½¿ç»“æžœæ›´æŽ¥è¿‘çœŸå®žæ°´æ–‡å˜åŒ–
            single_pred = model.predict(X_pred)[0]
            # æ ¹æ®ä¹‹å‰çš„æ³¢åŠ¨æ°´å¹³æ·»åŠ åˆç†çš„éšæœºå˜åŒ–
            if i > 0:
                # èŽ·å–åŽ†å²æ•°æ®çš„æ ‡å‡†å·®
                history_std = temp_df['downstream_smooth'].iloc[-10:].std()
                if np.isnan(history_std) or history_std < 0.5:
                    history_std = 0.5  # æœ€å°æ ‡å‡†å·®
                # æ·»åŠ ç¬¦åˆåŽ†å²æ³¢åŠ¨çš„éšæœºå˜åŒ–
                noise_level = history_std * 0.1  # éšæœºå˜åŒ–为标准差的10%
                random_change = np.random.normal(0, noise_level)
                single_pred = single_pred + random_change
                # æ‰“印预测结果的随机变化
                print(f"添加随机变化: {random_change:.4f}, åŽ†å²æ ‡å‡†å·®: {history_std:.4f}")
            print(f"步骤 {i+1} æœ€ç»ˆé¢„测值: {single_pred:.4f}")
            predictions[i] = single_pred
            # åˆ›å»ºæ–°çš„一行数据,使用显著的上游变化模式
            # ä½¿ç”¨æ­£å¼¦æ³¢+随机噪声模拟潮汐影响
            upstream_change = 3.0 * np.sin(i/5.0 * np.pi) + np.random.normal(0, 1.5)  # æ›´å¤§çš„变化
            new_row = pd.DataFrame({
                'DateTime': [current_date],
                'upstream_smooth': [temp_df['upstream_smooth'].iloc[-1] + upstream_change],
                'downstream_smooth': [single_pred],
                'hour': [current_date.hour],
                'weekday': [current_date.dayofweek],
                'month': [current_date.month],
                'upstream': [temp_df['upstream'].iloc[-1] + upstream_change],
                'downstream': [single_pred],
                'lunar_phase_sin': [np.sin(2*np.pi*ld.day/15)],
                'lunar_phase_cos': [np.cos(2*np.pi*ld.day/15)],
                'is_high_tide': [1 if (ld.day <=5 or (ld.day >=16 and ld.day<=20)) else 0]
            })
            # å¦‚果有水位特征,也为新行添加水位数据
            if has_water_level:
                try:
                    # ä½¿ç”¨éšæœºæ³¢åŠ¨æ¨¡æ‹Ÿæ°´ä½å˜åŒ–ï¼ˆå‡è®¾å’Œä¸Šæ¸¸ç›åº¦ç›¸å…³ï¼‰
                    water_level_change = 0.2 * np.sin(i/5.0 * np.pi) + np.random.normal(0, 0.05)
                    last_water_level = temp_df['water_level'].iloc[-1]
                    new_water_level = last_water_level + water_level_change
                    # æ·»åŠ æ°´ä½ç›¸å…³åˆ—
                    new_row['water_level'] = new_water_level
                    new_row['water_level_smooth'] = new_water_level
                    # æ·»åŠ æ°´ä½ç»Ÿè®¡ç‰¹å¾
                    if 'mean_1d_water_level' in temp_df.columns:
                        new_row['mean_1d_water_level'] = temp_df['water_level_smooth'].iloc[-24:].mean()
                    if 'mean_3d_water_level' in temp_df.columns:
                        new_row['mean_3d_water_level'] = temp_df['water_level_smooth'].iloc[-72:].mean()
                    if 'std_1d_water_level' in temp_df.columns:
                        new_row['std_1d_water_level'] = temp_df['water_level_smooth'].iloc[-24:].std()
                    if 'water_level_change_1h' in temp_df.columns:
                        new_row['water_level_change_1h'] = new_water_level - temp_df['water_level_smooth'].iloc[-1]
                    if 'water_level_change_24h' in temp_df.columns:
                        new_row['water_level_change_24h'] = new_water_level - temp_df['water_level_smooth'].iloc[-24]
                    if 'water_level_sal_ratio' in temp_df.columns:
                        new_row['water_level_sal_ratio'] = new_water_level / single_pred if single_pred > 0 else 1.0
                except Exception as e:
                    print(f"为新行添加水位数据时出错: {e}")
            # ä¸ºæ–°è¡Œæ·»åŠ å…¶ä»–å¿…è¦çš„åˆ—ï¼Œç¡®ä¿ä¸ŽåŽŸæ•°æ®æ¡†ç»“æž„ä¸€è‡´
            for col in temp_df.columns:
                if col not in new_row.columns:
                    if col.startswith('upstream_delay_'):
                        delay = int(col.split('_')[-1].replace('h', ''))
                        if delay <= 1:
                            new_row[col] = temp_df['upstream_smooth'].iloc[-1]
                        else:
                            # å®‰å…¨èŽ·å–å»¶è¿Ÿå€¼ï¼Œæ£€æŸ¥æ˜¯å¦å­˜åœ¨å¯¹åº”çš„å»¶è¿Ÿåˆ—
                            prev_delay = delay - 1
                            prev_col = f'upstream_delay_{prev_delay}h'
                            if prev_col in temp_df.columns:
                                new_row[col] = temp_df[prev_col].iloc[-1]
                            else:
                                # å¦‚果前一个延迟不存在,则使用当前最新的上游值
                                new_row[col] = temp_df['upstream_smooth'].iloc[-1]
                    elif col.startswith('downstream_delay_'):
                        delay = int(col.split('_')[-1].replace('h', ''))
                        if delay <= 1:
                            new_row[col] = single_pred
                        else:
                            # å®‰å…¨èŽ·å–å»¶è¿Ÿå€¼ï¼Œæ£€æŸ¥æ˜¯å¦å­˜åœ¨å¯¹åº”çš„å»¶è¿Ÿåˆ—
                            prev_delay = delay - 1
                            prev_col = f'downstream_delay_{prev_delay}h'
                            if prev_col in temp_df.columns:
                                new_row[col] = temp_df[prev_col].iloc[-1]
                            else:
                                # å¦‚果前一个延迟不存在,则使用当前预测值
                                new_row[col] = single_pred
                    elif col.startswith('water_level_delay_') and has_water_level:
                        try:
                            delay = int(col.split('_')[-1].replace('h', ''))
                            if delay <= 1:
                                new_row[col] = new_row['water_level_smooth'].iloc[0]
                            else:
                                prev_delay = delay - 1
                                prev_col = f'water_level_delay_{prev_delay}h'
                                if prev_col in temp_df.columns:
                                    new_row[col] = temp_df[prev_col].iloc[-1]
                                else:
                                    new_row[col] = temp_df['water_level_smooth'].iloc[-1]
                        except Exception as e:
                            print(f"添加水位延迟特征时出错: {e}")
                            new_row[col] = temp_df[col].iloc[-1] if col in temp_df.columns else 0
                    elif col == 'lunar_phase_sin':
                        new_row[col] = np.sin(2*np.pi*current_date.day/15)
                    elif col == 'lunar_phase_cos':
                        new_row[col] = np.cos(2*np.pi*current_date.day/15)
                    elif col == 'is_high_tide':
                        new_row[col] = 1 if (current_date.day <=5 or (current_date.day >=16 and current_date.day<=20)) else 0
                    else:
                        # å¯¹äºŽæœªå¤„理的特征,简单复制上一值
                        if col in temp_df.columns:
                            new_row[col] = temp_df[col].iloc[-1]
                        else:
                            new_row[col] = 0  # é»˜è®¤å€¼
            # å°†æ–°è¡Œæ·»åŠ åˆ°ä¸´æ—¶æ•°æ®æ¡†
            temp_df = pd.concat([temp_df, new_row], ignore_index=True)
            # é‡æ–°è®¡ç®—统计特征,使用最近的24/72小时数据
            # è¿™æ˜¯å…³é”®æ­¥éª¤ï¼Œç¡®ä¿æ¯ä¸€æ­¥é¢„测使用更新后的统计特征
            temp_df_last = temp_df.iloc[-1:].copy()
            # è®¡ç®—上游统计特征
            recent_upstream = temp_df['upstream_smooth'].iloc[-24:].values
            temp_df_last['mean_1d_up'] = np.mean(recent_upstream)
            temp_df_last['std_1d_up'] = np.std(recent_upstream)
            temp_df_last['max_1d_up'] = np.max(recent_upstream)
            temp_df_last['min_1d_up'] = np.min(recent_upstream)
            temp_df_last['mean_3d_up'] = np.mean(temp_df['upstream_smooth'].iloc[-min(72, len(temp_df)):].values)
            # è®¡ç®—下游统计特征
            recent_downstream = temp_df['downstream_smooth'].iloc[-24:].values
            temp_df_last['mean_1d_down'] = np.mean(recent_downstream)
            temp_df_last['std_1d_down'] = np.std(recent_downstream)
            temp_df_last['max_1d_down'] = np.max(recent_downstream)
            temp_df_last['min_1d_down'] = np.min(recent_downstream)
            temp_df_last['mean_3d_down'] = np.mean(temp_df['downstream_smooth'].iloc[-min(72, len(temp_df)):].values)
            # æ›´æ–°ä¸´æ—¶æ•°æ®æ¡†ä¸­çš„æœ€åŽä¸€è¡Œ
            temp_df.iloc[-1] = temp_df_last.iloc[0]
            # æ›´æ–°å»¶è¿Ÿç‰¹å¾ï¼Œç¡®ä¿ä¸Žwindow的滑动一致
            for delay in range(1, 121):
                # ä¸Šæ¸¸å»¶è¿Ÿç‰¹å¾æ›´æ–°
                delay_col = f'upstream_delay_{delay}h'
                if delay_col in temp_df.columns:
                    if len(temp_df) > delay:
                        temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[-delay-1]['upstream_smooth']
                    else:
                        temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[0]['upstream_smooth']
                # ä¸‹æ¸¸å»¶è¿Ÿç‰¹å¾æ›´æ–°
                delay_col = f'downstream_delay_{delay}h'
                if delay_col in temp_df.columns:
                    if len(temp_df) > delay:
                        temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[-delay-1]['downstream_smooth']
                    else:
                        temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[0]['downstream_smooth']
                # æ°´ä½å»¶è¿Ÿç‰¹å¾æ›´æ–°
                if has_water_level:
                    delay_col = f'water_level_delay_{delay}h'
                    if delay_col in temp_df.columns:
                        if len(temp_df) > delay:
                            temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[-delay-1]['water_level_smooth']
                        else:
                            temp_df.loc[temp_df.index[-1], delay_col] = temp_df.iloc[0]['water_level_smooth']
            # æ‰“印更新后的统计特征值
            print(f"更新后mean_1d_down: {temp_df.iloc[-1]['mean_1d_down']:.4f}, mean_1d_up: {temp_df.iloc[-1]['mean_1d_up']:.4f}")
        print("递归预测完成")
        # èŽ·å–æ¨¡åž‹æŒ‡æ ‡
        metrics = None
        if os.path.exists(model_cache_file):
            try:
                with open(model_cache_file, 'rb') as f:
                    model_data = pickle.load(f)
                    metrics = {
                        'rmse': model_data.get('rmse', None),
                        'mae': model_data.get('mae', None)
                    }
            except Exception as e:
                print(f"获取模型指标失败: {e}")
        return future_dates, predictions, model, metrics
    except Exception as e:
        print("预测过程异常:", e)
        import traceback
        traceback.print_exc()
        return None, None, None, None
# -------------------------------
# GUI界面部分
# -------------------------------
def run_gui():
    def configure_gui_fonts():
        font_names = ['微软雅黑', 'Microsoft YaHei', 'SimSun', 'SimHei']
        for font_name in font_names:
            try:
                default_font = tkfont.nametofont("TkDefaultFont")
                default_font.configure(family=font_name)
                text_font = tkfont.nametofont("TkTextFont")
                text_font.configure(family=font_name)
                fixed_font = tkfont.nametofont("TkFixedFont")
                fixed_font.configure(family=font_name)
                return True
            except Exception as e:
                continue
        return False
    def on_predict():
        try:
            predict_start = time()
            status_label.config(text="预测中...")
            root.update()
            start_time_dt = pd.to_datetime(entry.get())
            force_retrain = retrain_var.get()
            future_dates, predictions, model, metrics = train_and_predict(df, start_time_dt, force_retrain)
            if future_dates is None or predictions is None:
                status_label.config(text="预测失败")
                return
            # èŽ·å–å¹¶æ˜¾ç¤ºæ¨¡åž‹å‡†ç¡®åº¦æŒ‡æ ‡
            if metrics:
                metrics_text = f"模型准确度 - RMSE: {metrics['rmse']:.4f}, MAE: {metrics['mae']:.4f}"
                metrics_label.config(text=metrics_text)
            # æ¸…除图形并重新绘制
            ax.clear()
            # åˆ›å»ºåŒy轴图表
            ax2 = None
            has_water_level = 'water_level' in df.columns and 'water_level_smooth' in df.columns
            if has_water_level:
                try:
                    ax2 = ax.twinx()
                except Exception as e:
                    print(f"创建双y轴失败: {e}")
                    ax2 = None
            # ç»˜åˆ¶åŽ†å²æ•°æ®ï¼ˆæœ€è¿‘ 120 å¤©ï¼‰
            history_end = min(start_time_dt, df['DateTime'].max())
            history_start = history_end - timedelta(days=120)
            hist_data = df[(df['DateTime'] >= history_start) & (df['DateTime'] <= history_end)]
            # ç¡®ä¿æ•°æ®ä¸ä¸ºç©º
            if len(hist_data) == 0:
                status_label.config(text="错误: æ‰€é€‰æ—¶é—´èŒƒå›´å†…没有历史数据")
                return
            # ç»˜åˆ¶åŸºæœ¬æ•°æ®
            ax.plot(hist_data['DateTime'], hist_data['downstream_smooth'],
                    label='一取水(下游)盐度', color='blue', linewidth=1.5)
            ax.plot(hist_data['DateTime'], hist_data['upstream_smooth'],
                    label='青龙港(上游)盐度', color='purple', linewidth=1.5, alpha=0.7)
            # ç»˜åˆ¶æ°´ä½æ•°æ®ï¼ˆå¦‚果有)
            if ax2 is not None and has_water_level:
                try:
                    # æ£€æŸ¥æ°´ä½æ•°æ®æ˜¯å¦æœ‰è¶³å¤Ÿçš„非NaN值
                    valid_water_level = hist_data['water_level_smooth'].dropna()
                    if len(valid_water_level) > 10:  # è‡³å°‘有10个有效值
                        ax2.plot(hist_data['DateTime'], hist_data['water_level_smooth'],
                                label='长江水位', color='green', linewidth=1.5, linestyle='--')
                        ax2.set_ylabel('水位 (m)', color='green')
                        ax2.tick_params(axis='y', labelcolor='green')
                    else:
                        print("水位数据有效值不足,跳过水位图")
                except Exception as e:
                    print(f"绘制水位数据时出错: {e}")
            if 'qinglong_lake_smooth' in hist_data.columns:
                ax.plot(hist_data['DateTime'], hist_data['qinglong_lake_smooth'],
                        label='青龙湖盐度', color='green', linewidth=1.5, alpha=0.7)
            # ç»˜åˆ¶é¢„测数据
            if len(future_dates) > 0 and len(predictions) > 0:
                ax.plot(future_dates, predictions, marker='o', linestyle='--',
                        label='递归预测盐度', color='red', linewidth=2)
                # æ·»åŠ é¢„æµ‹çš„ç½®ä¿¡åŒºé—´
                std_dev = hist_data['downstream_smooth'].std() * 0.5
                ax.fill_between(future_dates, predictions - std_dev, predictions + std_dev,
                                color='red', alpha=0.2)
            # ç»˜åˆ¶å®žé™…数据(如果有
            actual_data = df[(df['DateTime'] >= start_time_dt) & (df['DateTime'] <= future_dates[-1])]
            actual_values = None
            if not actual_data.empty:
                actual_values = []
                # èŽ·å–ä¸Žé¢„æµ‹æ—¥æœŸæœ€æŽ¥è¿‘çš„å®žé™…æ•°æ®
                for pred_date in future_dates:
                    closest_idx = np.argmin(np.abs(actual_data['DateTime'] - pred_date))
                    actual_values.append(actual_data['downstream_smooth'].iloc[closest_idx])
                # ç»˜åˆ¶å®žé™…盐度曲线
                ax.plot(future_dates, actual_values, marker='s', linestyle='-',
                        label='实际盐度', color='orange', linewidth=2)
            # è®¾ç½®å›¾è¡¨æ ‡é¢˜å’Œæ ‡ç­¾
            ax.set_xlabel('日期')
            ax.set_ylabel('盐度')
            ax.set_title(f"从 {start_time_dt.strftime('%Y-%m-%d %H:%M:%S')} å¼€å§‹çš„递归单步盐度预测")
            # è®¾ç½®å›¾ä¾‹å¹¶åº”用紧凑布局
            if ax2 is not None:
                try:
                    lines1, labels1 = ax.get_legend_handles_labels()
                    lines2, labels2 = ax2.get_legend_handles_labels()
                    if lines2:  # ç¡®ä¿æ°´ä½æ•°æ®å·²ç»˜åˆ¶
                        ax.legend(lines1 + lines2, labels1 + labels2, loc='best')
                    else:
                        ax.legend(loc='best')
                except Exception as e:
                    print(f"创建图例时出错: {e}")
                    ax.legend(loc='best')
            else:
                ax.legend(loc='best')
            fig.tight_layout()
            # å¼ºåˆ¶é‡ç»˜ - ä½¿ç”¨å¤šç§æ–¹å¼ç¡®ä¿å›¾å½¢æ˜¾ç¤º
            plt.close(fig)  # å…³é—­æ—§çš„
            fig.canvas.draw()
            fig.canvas.flush_events()
            plt.draw()
            # æ›´æ–°é¢„测结果文本
            predict_time = time() - predict_start
            status_label.config(text=f"递归预测完成 (耗时: {predict_time:.2f}秒)")
            # æ˜¾ç¤ºé¢„测结果
            result_text = "递归单步预测结果:\n\n"
            # å¦‚果有实际值,计算差值和百分比误差
            if actual_values is not None:
                result_text += "日期         é¢„测值    å®žé™…值        å·®å€¼\n"
                result_text += "--------------------------------------\n"
                for i, (date, pred, actual) in enumerate(zip(future_dates, predictions, actual_values)):
                    diff = pred - actual
                    # ç§»é™¤ç™¾åˆ†æ¯”误差显示
                    result_text += f"{date.strftime('%Y-%m-%d')}  {pred:6.2f}    {actual:6.2f}    {diff:6.2f}\n"
                # # è®¡ç®—整体评价指标
                # mae = np.mean(np.abs(np.array(predictions) - np.array(actual_values)))
                # rmse = np.sqrt(np.mean((np.array(predictions) - np.array(actual_values))**2))
                # result_text += "\n预测评估指标:\n"
                # result_text += f"平均绝对误差(MAE): {mae:.4f}\n"
                # result_text += f"均方根误差(RMSE): {rmse:.4f}\n"
            else:
                result_text += "日期         é¢„测值\n"
                result_text += "-------------------\n"
                for i, (date, pred) in enumerate(zip(future_dates, predictions)):
                    result_text += f"{date.strftime('%Y-%m-%d')}  {pred:6.2f}\n"
                result_text += "\n无实际值进行对比"
            update_result_text(result_text)
        except Exception as e:
            status_label.config(text=f"错误: {str(e)}")
            import traceback
            traceback.print_exc()
    def on_scroll(event):
        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        zoom_factor = 1.1
        x_data = event.xdata if event.xdata is not None else (xlim[0]+xlim[1])/2
        y_data = event.ydata if event.ydata is not None else (ylim[0]+ylim[1])/2
        x_rel = (x_data - xlim[0]) / (xlim[1] - xlim[0])
        y_rel = (y_data - ylim[0]) / (ylim[1] - ylim[0])
        if event.step > 0:
            new_width = (xlim[1]-xlim[0]) / zoom_factor
            new_height = (ylim[1]-ylim[0]) / zoom_factor
            x0 = x_data - x_rel * new_width
            y0 = y_data - y_rel * new_height
            ax.set_xlim([x0, x0+new_width])
            ax.set_ylim([y0, y0+new_height])
        else:
            new_width = (xlim[1]-xlim[0]) * zoom_factor
            new_height = (ylim[1]-ylim[0]) * zoom_factor
            x0 = x_data - x_rel * new_width
            y0 = y_data - y_rel * new_height
            ax.set_xlim([x0, x0+new_width])
            ax.set_ylim([y0, y0+new_height])
        canvas.draw_idle()
    def update_cursor(event):
        if event.inaxes == ax:
            canvas.get_tk_widget().config(cursor="fleur")
        else:
            canvas.get_tk_widget().config(cursor="")
    def reset_view():
        display_history()
        status_label.config(text="图表视图已重置")
    root = tk.Tk()
    root.title("青龙港-陈行盐度预测系统")
    try:
        configure_gui_fonts()
    except Exception as e:
        print("字体配置异常:", e)
    # æ¢å¤è¾“入框和控制按钮
    input_frame = ttk.Frame(root, padding="10")
    input_frame.pack(fill=tk.X)
    ttk.Label(input_frame, text="输入开始时间 (YYYY-MM-DD HH:MM:SS)").pack(side=tk.LEFT)
    entry = ttk.Entry(input_frame, width=25)
    entry.pack(side=tk.LEFT, padx=5)
    predict_button = ttk.Button(input_frame, text="预测", command=on_predict)
    predict_button.pack(side=tk.LEFT)
    status_label = ttk.Label(input_frame, text="提示: ç¬¬ä¸€æ¬¡è¿è¡Œè¯·å‹¾é€‰'强制重新训练模型'")
    status_label.pack(side=tk.LEFT, padx=10)
    control_frame = ttk.Frame(root, padding="5")
    control_frame.pack(fill=tk.X)
    retrain_var = tk.BooleanVar(value=False)
    ttk.Checkbutton(control_frame, text="强制重新训练模型", variable=retrain_var).pack(side=tk.LEFT)
    # æ›´æ–°å›¾ä¾‹è¯´æ˜Žï¼ŒåŠ å…¥æ°´ä½æ•°æ®ä¿¡æ¯
    if 'water_level' in df.columns:
        legend_label = ttk.Label(control_frame, text="图例: ç´«è‰²=青龙港上游数据, è“è‰²=一取水下游数据, çº¢è‰²=预测值, ç»¿è‰²=长江水位")
    else:
        legend_label = ttk.Label(control_frame, text="图例: ç´«è‰²=青龙港上游数据, è“è‰²=一取水下游数据, çº¢è‰²=预测值, æ©™è‰²=实际值")
    legend_label.pack(side=tk.LEFT, padx=10)
    reset_button = ttk.Button(control_frame, text="重置视图", command=reset_view)
    reset_button.pack(side=tk.LEFT, padx=5)
    # æ·»åŠ æ˜¾ç¤ºæ¨¡åž‹å‡†ç¡®åº¦çš„æ ‡ç­¾
    metrics_frame = ttk.Frame(root, padding="5")
    metrics_frame.pack(fill=tk.X)
    model_metrics = get_model_metrics()
    metrics_text = "模型准确度: æœªçŸ¥" if not model_metrics else f"模型准确度 - RMSE: {model_metrics['rmse']:.4f}, MAE: {model_metrics['mae']:.4f}"
    metrics_label = ttk.Label(metrics_frame, text=metrics_text)
    metrics_label.pack(side=tk.LEFT, padx=10)
    # ç»“果显示区域
    result_frame = ttk.Frame(root, padding="10")
    result_frame.pack(fill=tk.BOTH, expand=True)
    # å·¦ä¾§æ”¾ç½®å›¾è¡¨
    plot_frame = ttk.Frame(result_frame, width=800, height=600)
    plot_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
    plot_frame.pack_propagate(False)  # ä¸å…è®¸æ¡†æž¶æ ¹æ®å†…容调整大小
    # å³ä¾§æ”¾ç½®æ–‡æœ¬ç»“æžœ
    text_frame = ttk.Frame(result_frame)
    text_frame.pack(side=tk.RIGHT, fill=tk.Y)
    # ä½¿ç”¨ç­‰å®½å­—体显示结果
    result_font = tkfont.Font(family="Courier New", size=10, weight="normal")
    # æ·»åŠ æ–‡æœ¬æ¡†å’Œæ»šåŠ¨æ¡
    result_text = tk.Text(text_frame, width=50, height=25, font=result_font, wrap=tk.NONE)
    result_text.pack(side=tk.LEFT, fill=tk.BOTH)
    result_scroll = ttk.Scrollbar(text_frame, orient="vertical", command=result_text.yview)
    result_scroll.pack(side=tk.RIGHT, fill=tk.Y)
    result_text.configure(yscrollcommand=result_scroll.set)
    result_text.configure(state=tk.DISABLED)  # åˆå§‹è®¾ä¸ºåªè¯»
    # æ›´æ–°ç»“果文本的函数
    def update_result_text(text):
        result_text.configure(state=tk.NORMAL)
        result_text.delete(1.0, tk.END)
        result_text.insert(tk.END, text)
        result_text.configure(state=tk.DISABLED)
    # åˆ›å»ºæ›´é«˜DPI的图形以获得更好的显示质量
    fig, ax = plt.subplots(figsize=(10, 6), dpi=100)
    fig.tight_layout(pad=3.0)  # å¢žåŠ å†…è¾¹è·ï¼Œé˜²æ­¢æ ‡ç­¾è¢«æˆªæ–­
    # åˆ›å»ºç”»å¸ƒå¹¶æ·»åŠ åˆ°å›ºå®šå¤§å°çš„æ¡†æž¶
    canvas = FigureCanvasTkAgg(fig, master=plot_frame)
    canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
    # æ·»åŠ å·¥å…·æ ï¼ŒåŒ…å«ç¼©æ”¾ã€ä¿å­˜ç­‰åŠŸèƒ½
    toolbar_frame = ttk.Frame(plot_frame)
    toolbar_frame.pack(side=tk.BOTTOM, fill=tk.X)
    toolbar = NavigationToolbar2Tk(canvas, toolbar_frame)
    toolbar.update()
    # å¯ç”¨ç´§å‡‘布局,并设置自动调整以使图表完全显示
    def on_resize(event):
        fig.tight_layout()
        canvas.draw_idle()
    # æ·»åŠ å›¾è¡¨äº¤äº’åŠŸèƒ½
    canvas.mpl_connect('resize_event', on_resize)
    canvas.mpl_connect('scroll_event', on_scroll)
    canvas.mpl_connect('motion_notify_event', update_cursor)
    # æ·»åŠ é¼ æ ‡æ‹–åŠ¨åŠŸèƒ½
    def on_press(event):
        if event.inaxes != ax:
            return
        canvas.get_tk_widget().config(cursor="fleur")
        ax._pan_start = (event.x, event.y, event.xdata, event.ydata)
    def on_release(event):
        ax._pan_start = None
        canvas.get_tk_widget().config(cursor="")
        canvas.draw_idle()
    def on_motion(event):
        if not hasattr(ax, '_pan_start') or ax._pan_start is None:
            return
        if event.inaxes != ax:
            return
        start_x, start_y, x_data, y_data = ax._pan_start
        dx = event.x - start_x
        dy = event.y - start_y
        # èŽ·å–å½“å‰è§†å›¾
        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        # è®¡ç®—图表坐标系中的移动
        x_scale = (xlim[1] - xlim[0]) / canvas.get_tk_widget().winfo_width()
        y_scale = (ylim[1] - ylim[0]) / canvas.get_tk_widget().winfo_height()
        # æ›´æ–°è§†å›¾
        ax.set_xlim(xlim[0] - dx * x_scale, xlim[1] - dx * x_scale)
        ax.set_ylim(ylim[0] + dy * y_scale, ylim[1] + dy * y_scale)
        # æ›´æ–°æ‹–动起点
        ax._pan_start = (event.x, event.y, event.xdata, event.ydata)
        canvas.draw_idle()
    # è¿žæŽ¥é¼ æ ‡äº‹ä»¶
    canvas.mpl_connect('button_press_event', on_press)
    canvas.mpl_connect('button_release_event', on_release)
    canvas.mpl_connect('motion_notify_event', on_motion)
    # ä¿®æ”¹æ»šè½®ç¼©æ”¾å‡½æ•°ï¼Œä½¿å…¶æ›´å¹³æ»‘
    def on_scroll(event):
        if event.inaxes != ax:
            return
        # å½“前视图
        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        # ç¼©æ”¾å› å­
        zoom_factor = 1.1 if event.step > 0 else 0.9
        # èŽ·å–é¼ æ ‡ä½ç½®ä½œä¸ºç¼©æ”¾ä¸­å¿ƒ
        x_data = event.xdata
        y_data = event.ydata
        # è®¡ç®—新视图的宽度和高度
        new_width = (xlim[1] - xlim[0]) * zoom_factor
        new_height = (ylim[1] - ylim[0]) * zoom_factor
        # è®¡ç®—新视图的左下角坐标,以鼠标位置为中心缩放
        x_rel = (x_data - xlim[0]) / (xlim[1] - xlim[0])
        y_rel = (y_data - ylim[0]) / (ylim[1] - ylim[0])
        x0 = x_data - x_rel * new_width
        y0 = y_data - y_rel * new_height
        # æ›´æ–°è§†å›¾
        ax.set_xlim([x0, x0 + new_width])
        ax.set_ylim([y0, y0 + new_height])
        canvas.draw_idle()
    # æ›´æ–°åŽ†å²æ•°æ®æ˜¾ç¤ºå‡½æ•°
    def display_history():
        try:
            ax.clear()
            end_date = df['DateTime'].max()
            start_date = max(df['DateTime'].min(), end_date - timedelta(days=60))
            hist_data = df[(df['DateTime'] >= start_date) & (df['DateTime'] <= end_date)]
            if len(hist_data) == 0:
                status_label.config(text="警告: æ²¡æœ‰å¯ç”¨çš„历史数据")
                return
            # åˆ›å»ºåŒy轴图表
            ax2 = None
            has_water_level = 'water_level' in hist_data.columns and 'water_level_smooth' in hist_data.columns
            if has_water_level:
                ax2 = ax.twinx()
            # ç»˜åˆ¶æ•°æ®
            ax.plot(hist_data['DateTime'], hist_data['downstream_smooth'],
                    label='一取水(下游)盐度', color='blue', linewidth=1.5)
            ax.plot(hist_data['DateTime'], hist_data['upstream_smooth'],
                    label='青龙港(上游)盐度', color='purple', linewidth=1.5, alpha=0.7)
            # è®¾ç½®è¾¹ç•Œï¼Œç¡®ä¿æœ‰ä¸€è‡´çš„视图
            y_min = min(hist_data['downstream_smooth'].min(), hist_data['upstream_smooth'].min()) * 0.9
            y_max = max(hist_data['downstream_smooth'].max(), hist_data['upstream_smooth'].max()) * 1.1
            ax.set_ylim(y_min, y_max)
            # å¦‚果有水位数据,在第二个y轴上绘制
            if ax2 is not None and has_water_level:
                try:
                    # æ£€æŸ¥æ°´ä½æ•°æ®æ˜¯å¦æœ‰è¶³å¤Ÿçš„非NaN值
                    valid_water_level = hist_data['water_level_smooth'].dropna()
                    if len(valid_water_level) > 10:  # è‡³å°‘有10个有效值
                        ax2.plot(hist_data['DateTime'], hist_data['water_level_smooth'],
                                label='长江水位', color='green', linewidth=1.5, linestyle='--')
                        ax2.set_ylabel('水位 (m)', color='green')
                        ax2.tick_params(axis='y', labelcolor='green')
                        # åˆ›å»ºç»„合图例
                        lines1, labels1 = ax.get_legend_handles_labels()
                        lines2, labels2 = ax2.get_legend_handles_labels()
                        ax.legend(lines1 + lines2, labels1 + labels2, loc='best')
                    else:
                        print("水位数据有效值不足,跳过水位图")
                        ax.legend(loc='best')
                except Exception as e:
                    print(f"绘制水位数据时出错: {e}")
                    ax.legend(loc='best')
            else:
                ax.legend(loc='best')
            # è®¾ç½®æ ‡ç­¾å’Œæ ‡é¢˜
            ax.set_xlabel('日期')
            ax.set_ylabel('盐度')
            ax.set_title('历史数据对比')
            # ä½¿ç”¨ç´§å‡‘布局并绘制
            fig.tight_layout()
            # ä½¿ç”¨å¤šç§æ–¹æ³•确保图像显示
            plt.close(fig)  # å…³é—­æ—§çš„
            fig.canvas.draw()
            fig.canvas.flush_events()
            plt.draw()
        except Exception as e:
            status_label.config(text=f"显示历史数据时出错: {str(e)}")
            import traceback
            traceback.print_exc()
    display_history()
    root.mainloop()
# -------------------------------
# ä¸»ç¨‹åºå…¥å£ï¼šåŠ è½½æ•°æ®ã€æ·»åŠ ç‰¹å¾ã€ç”Ÿæˆå»¶è¿Ÿç‰¹å¾åŽå¯åŠ¨GUI
# -------------------------------
def save_processed_data(df, filename='processed_data.pkl'):
    try:
        df.to_pickle(filename)
        print(f"已保存处理后的数据到 {filename}")
        return True
    except Exception as e:
        print(f"保存数据失败: {e}")
        return False
def load_processed_data(filename='processed_data.pkl'):
    try:
        if os.path.exists(filename):
            df = pd.read_pickle(filename)
            print(f"已从 {filename} åŠ è½½å¤„ç†åŽçš„æ•°æ®")
            return df
        else:
            print(f"找不到处理后的数据文件 {filename}")
            return None
    except Exception as e:
        print(f"加载数据失败: {e}")
        return None
# åˆ é™¤æ—§çš„处理数据(如果存在),以应用修复后的代码
if os.path.exists('processed_data.pkl'):
    try:
        os.remove('processed_data.pkl')
        print("已删除旧的处理数据缓存,将使用修复后的代码重新处理数据")
    except Exception as e:
        print(f"删除缓存文件失败: {e}")
# åˆ é™¤æ—§çš„æ¨¡åž‹æ–‡ä»¶ï¼ˆå¦‚果存在)
if os.path.exists('salinity_model.pkl'):
    try:
        os.remove('salinity_model.pkl')
        print("已删除旧的模型文件,将重新训练模型")
    except Exception as e:
        print(f"删除模型文件失败: {e}")
# å°è¯•加载处理后的数据,如果不存在则重新处理
processed_data = load_processed_data()
if processed_data is not None:
    df = processed_data
else:
    # æ·»åŠ é•¿æ±Ÿæ¶²ä½æ•°æ®ä½œä¸ºå‚æ•°
    df = load_data('青龙港1.csv', '一取水.csv', '长江液位.csv', '大通流量.csv', '降雨量.csv')
    if df is not None:
        # æ·»åŠ æ—¶é—´ç‰¹å¾
        df['hour'] = df['DateTime'].dt.hour
        df['weekday'] = df['DateTime'].dt.dayofweek
        df['month'] = df['DateTime'].dt.month
        # æ·»åŠ å†œåŽ†ç‰¹å¾
        df = add_lunar_features(df)
        # æ·»åŠ å»¶è¿Ÿç‰¹å¾ - ä½¿ç”¨æ”¹è¿›çš„函数
        delay_hours = [1,2,3,4,6,12,24,36,48,60,72,84,96,108,120]
        df = batch_create_delay_features(df, delay_hours)
        # æ·»åŠ ç»Ÿè®¡ç‰¹å¾
        df['mean_1d_up'] = df['upstream_smooth'].rolling(window=24, min_periods=1).mean()
        df['mean_3d_up'] = df['upstream_smooth'].rolling(window=72, min_periods=1).mean()
        df['std_1d_up'] = df['upstream_smooth'].rolling(window=24, min_periods=1).std()
        df['max_1d_up'] = df['upstream_smooth'].rolling(window=24, min_periods=1).max()
        df['min_1d_up'] = df['upstream_smooth'].rolling(window=24, min_periods=1).min()
        # æ·»åŠ ä¸Šæ¸¸ç›åº¦çš„å˜åŒ–çŽ‡ç‰¹å¾
        df['upstream_change_rate_1h'] = df['upstream_smooth'].pct_change(1)
        df['upstream_change_rate_24h'] = df['upstream_smooth'].pct_change(24)
        df['mean_1d_down'] = df['downstream_smooth'].rolling(window=24, min_periods=1).mean()
        df['mean_3d_down'] = df['downstream_smooth'].rolling(window=72, min_periods=1).mean()
        df['std_1d_down'] = df['downstream_smooth'].rolling(window=24, min_periods=1).std()
        df['max_1d_down'] = df['downstream_smooth'].rolling(window=24, min_periods=1).max()
        df['min_1d_down'] = df['downstream_smooth'].rolling(window=24, min_periods=1).min()
        # æ·»åŠ ä¸‹æ¸¸ç›åº¦çš„å˜åŒ–çŽ‡ç‰¹å¾
        df['downstream_change_rate_1h'] = df['downstream_smooth'].pct_change(1)
        df['downstream_change_rate_24h'] = df['downstream_smooth'].pct_change(24)
        # æ·»åŠ ä¸Šä¸‹æ¸¸ç›åº¦å·®å¼‚ç‰¹å¾
        df['salinity_diff'] = df['upstream_smooth'] - df['downstream_smooth']
        df['salinity_diff_1h'] = df['salinity_diff'].diff(1)
        df['salinity_diff_24h'] = df['salinity_diff'].diff(24)
        # æ·»åŠ ç›åº¦æ¯”çŽ‡ç‰¹å¾
        df['salinity_ratio'] = df['upstream_smooth'] / df['downstream_smooth']
        df['salinity_ratio_1h'] = df['salinity_ratio'].diff(1)
        df['salinity_ratio_24h'] = df['salinity_ratio'].diff(24)
        # æ·»åŠ æ°´ä½ç»Ÿè®¡ç‰¹å¾ï¼ˆå¦‚æžœæ°´ä½æ•°æ®å­˜åœ¨ï¼‰
        if 'water_level' in df.columns:
            # é¦–先创建水位平滑特征
            if 'water_level_smooth' not in df.columns:
                df['water_level_smooth'] = df['water_level'].rolling(window=24, min_periods=1, center=True).mean()
                df['water_level_smooth'] = df['water_level_smooth'].fillna(df['water_level'])
            # æ·»åŠ æ°´ä½ç»Ÿè®¡ç‰¹å¾
            df['mean_1d_water_level'] = df['water_level_smooth'].rolling(window=24, min_periods=1).mean()
            df['mean_3d_water_level'] = df['water_level_smooth'].rolling(window=72, min_periods=1).mean()
            df['std_1d_water_level'] = df['water_level_smooth'].rolling(window=24, min_periods=1).std()
            df['max_1d_water_level'] = df['water_level_smooth'].rolling(window=24, min_periods=1).max()
            df['min_1d_water_level'] = df['water_level_smooth'].rolling(window=24, min_periods=1).min()
            # è®¡ç®—水位变化率
            df['water_level_change_1h'] = df['water_level_smooth'].diff(1)
            df['water_level_change_24h'] = df['water_level_smooth'].diff(24)
            # è®¡ç®—水位与盐度的相关特征
            df['water_level_sal_ratio'] = df['water_level_smooth'] / df['downstream_smooth']
            df['water_level_sal_ratio_1h'] = df['water_level_sal_ratio'].diff(1)
            df['water_level_sal_ratio_24h'] = df['water_level_sal_ratio'].diff(24)
            # æ·»åŠ æ°´ä½ä¸Žç›åº¦çš„äº¤äº’ç‰¹å¾
            df['water_level_sal_interaction'] = df['water_level_smooth'] * df['downstream_smooth']
            df['water_level_sal_interaction_1h'] = df['water_level_sal_interaction'].diff(1)
            df['water_level_sal_interaction_24h'] = df['water_level_sal_interaction'].diff(24)
            print("水位特征已添加")
        # ä¿å­˜å¤„理后的数据
        save_processed_data(df)
if df is not None:
    run_gui()
else:
    print("数据加载失败,无法运行预测。")