|
|
|
|
|
**Outline of model training**
|
|
|
|
|
|
**default**
|
|
|
4 different locations
|
|
|
|
|
|
epochs=200
|
|
|
|
|
|
batch_size=64
|
|
|
|
|
|
callbacks=[checkpoint, early_stopping (patience=5), reduce_lr(factor=0.1, patience=5, min_lr=1e-6)]
|
|
|
|
|
|
learning_rate=0.001
|
|
|
|
|
|
LSTM layer with 128 units
|
|
|
|
|
|
LSTM output layer
|
|
|
|
|
|
**testing**
|
|
|
|
|
|
LSTM units 128, 64, 32
|
|
|
|
|
|
Dense units 128, 64, 32
|
|
|
|
|
|
bidir units 128, 64
|
|
|
|
|
|
batch_size 128, 64, 32
|
|
|
|
|
|
2 lstm layers
|
|
|
|
|
|
learning rate
|
|
|
|
|
|
dropout layer
|
|
|
|
|
|
patience
|
|
|
|
|
|
combinations of the parameters with better results
|
|
|
|
|
|
```
|
|
|
def train_model(model, X_train, y_train, X_test, y_test):
|
|
|
epochs=200
|
|
|
batch_size=64
|
|
|
|
|
|
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
|
|
|
|
|
|
# Learning rate scheduler callback
|
|
|
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-6)
|
|
|
|
|
|
|
|
|
checkpoint_path = "checkpoints/model_epoch_{epoch:02d}_val_loss_{val_loss:.2f}.weights.h5"
|
|
|
checkpoint = ModelCheckpoint(
|
|
|
filepath=checkpoint_path, save_weights_only=True,
|
|
|
monitor='val_loss', mode='min', save_best_only=True, verbose=1
|
|
|
)
|
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
|
|
|
validation_data=(X_test, y_test), callbacks=[checkpoint, early_stopping, reduce_lr])
|
|
|
return model, history
|
|
|
|
|
|
|
|
|
model1 = Sequential([
|
|
|
Input(shape=(sequence_length, feature_count)),
|
|
|
LSTM(128, return_sequences=True),
|
|
|
LSTM(sequence_length) # Ensure the output shape matches y
|
|
|
])
|
|
|
|
|
|
optimizer = Adam(learning_rate=0.001, clipvalue=1.0)
|
|
|
```
|
|
|
|
|
|
**testing of different Parameters and Layers**
|
|
|
|