... | ... | @@ -67,5 +67,46 @@ model1 = Sequential([ |
|
|
optimizer = Adam(learning_rate=0.001, clipvalue=1.0)
|
|
|
```
|
|
|
|
|
|
**testing of different Parameters and Layers**
|
|
|
|
|
|
**After testing and more debugging I have found this to have good Correlation and RMSE values. I will keep testing and train a good model for all locations**
|
|
|
|
|
|
```
|
|
|
def train_model(model, X_train, y_train, X_test, y_test):
|
|
|
epochs=200
|
|
|
batch_size=32
|
|
|
|
|
|
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
|
|
|
|
|
|
# Learning rate scheduler callback
|
|
|
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=1e-6)
|
|
|
|
|
|
|
|
|
checkpoint_path = "checkpoints/model_epoch_{epoch:02d}_val_loss_{val_loss:.2f}.weights.h5"
|
|
|
checkpoint = ModelCheckpoint(
|
|
|
filepath=checkpoint_path, save_weights_only=True,
|
|
|
monitor='val_loss', mode='min', save_best_only=True, verbose=1
|
|
|
)
|
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
|
|
|
validation_data=(X_test, y_test), callbacks=[checkpoint, early_stopping, reduce_lr])
|
|
|
return model, history
|
|
|
|
|
|
|
|
|
modelyyx = Sequential([
|
|
|
Input(shape=(sequence_length, feature_count)),
|
|
|
Bidirectional(LSTM(128, return_sequences=True)),
|
|
|
LSTM(sequence_length) # Ensure the output shape matches y
|
|
|
])
|
|
|
|
|
|
histories = []
|
|
|
for X_train, y_train, X_test, y_test in all_sequences:
|
|
|
optimizer = Adam(learning_rate=0.001, clipvalue=1.0)
|
|
|
modelyyx.compile(optimizer=optimizer, loss='mse')
|
|
|
print("NEW LOCATION")
|
|
|
modelyyx, history = train_model(modelyyx, X_train, y_train, X_test, y_test)
|
|
|
```
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|