pytorch forecasting example

Solutions on MaxInterview for pytorch forecasting example by the best coders in the world

showing results for - "pytorch forecasting example"
Delphine
24 Jan 2019
1import pytorch_lightning as pl
2from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
3
4from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
5
6# load data
7data = ...
8
9# define dataset
10max_encode_length = 36
11max_prediction_length = 6
12training_cutoff = "YYYY-MM-DD"  # day for cutoff
13
14training = TimeSeriesDataSet(
15    data[lambda x: x.date < training_cutoff],
16    time_idx= ...,
17    target= ...,
18    # weight="weight",
19    group_ids=[ ... ],
20    max_encode_length=max_encode_length,
21    max_prediction_length=max_prediction_length,
22    static_categoricals=[ ... ],
23    static_reals=[ ... ],
24    time_varying_known_categoricals=[ ... ],
25    time_varying_known_reals=[ ... ],
26    time_varying_unknown_categoricals=[ ... ],
27    time_varying_unknown_reals=[ ... ],
28)
29
30# create validation and training dataset
31validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training.index.time.max() + 1, stop_randomization=True)
32batch_size = 128
33train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=2)
34val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=2)
35
36# define trainer with early stopping
37early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min")
38lr_logger = LearningRateMonitor()
39trainer = pl.Trainer(
40    max_epochs=100,
41    gpus=0,
42    gradient_clip_val=0.1,
43    limit_train_batches=30,
44    callbacks=[lr_logger, early_stop_callback],
45)
46
47# create the model
48tft = TemporalFusionTransformer.from_dataset(
49    training,
50    learning_rate=0.03,
51    hidden_size=32,
52    attention_head_size=1,
53    dropout=0.1,
54    hidden_continuous_size=16,
55    output_size=7,
56    loss=QuantileLoss(),
57    log_interval=2,
58    reduce_on_plateau_patience=4
59)
60print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
61
62# find optimal learning rate (set limit_train_batches to 1.0 and log_interval = -1)
63res = trainer.tuner.lr_find(
64    tft, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, early_stop_threshold=1000.0, max_lr=0.3,
65)
66
67print(f"suggested learning rate: {res.suggestion()}")
68fig = res.plot(show=True, suggest=True)
69fig.show()
70
71# fit the model
72trainer.fit(
73    tft, train_dataloader=train_dataloader, val_dataloaders=val_dataloader,
74)
75