1# demo.py
2
3import torch
4import torchvision.utils as vutils
5import numpy as np
6import torchvision.models as models
7from torchvision import datasets
8from tensorboardX import SummaryWriter
9
10resnet18 = models.resnet18(False)
11writer = SummaryWriter()
12sample_rate = 44100
13freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
14
15for n_iter in range(100):
16
17 dummy_s1 = torch.rand(1)
18 dummy_s2 = torch.rand(1)
19 # data grouping by `slash`
20 writer.add_scalar('data/scalar1', dummy_s1[0], n_iter)
21 writer.add_scalar('data/scalar2', dummy_s2[0], n_iter)
22
23 writer.add_scalars('data/scalar_group', {'xsinx': n_iter * np.sin(n_iter),
24 'xcosx': n_iter * np.cos(n_iter),
25 'arctanx': np.arctan(n_iter)}, n_iter)
26
27 dummy_img = torch.rand(32, 3, 64, 64) # output from network
28 if n_iter % 10 == 0:
29 x = vutils.make_grid(dummy_img, normalize=True, scale_each=True)
30 writer.add_image('Image', x, n_iter)
31
32 dummy_audio = torch.zeros(sample_rate * 2)
33 for i in range(x.size(0)):
34 # amplitude of sound should in [-1, 1]
35 dummy_audio[i] = np.cos(freqs[n_iter // 10] * np.pi * float(i) / float(sample_rate))
36 writer.add_audio('myAudio', dummy_audio, n_iter, sample_rate=sample_rate)
37
38 writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
39
40 for name, param in resnet18.named_parameters():
41 writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)
42
43 # needs tensorboard 0.4RC or later
44 writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(100), n_iter)
45
46dataset = datasets.MNIST('mnist', train=False, download=True)
47images = dataset.test_data[:100].float()
48label = dataset.test_labels[:100]
49
50features = images.view(100, 784)
51writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
52
53# export scalar data to JSON for external processing
54writer.export_scalars_to_json("./all_scalars.json")
55writer.close()
56
57# for displaying run the following in the terminal:
58# "runs" could be any directory in which your tf.events are saved.
59tensorboard --logdir runs
1# demo.py
2
3import torch
4import torchvision.utils as vutils
5import numpy as np
6import torchvision.models as models
7from torchvision import datasets
8from tensorboardX import SummaryWriter
9
10resnet18 = models.resnet18(False)
11writer = SummaryWriter()
12sample_rate = 44100
13freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
14
15for n_iter in range(100):
16
17 dummy_s1 = torch.rand(1)
18 dummy_s2 = torch.rand(1)
19 # data grouping by `slash`
20 writer.add_scalar('data/scalar1', dummy_s1[0], n_iter)
21 writer.add_scalar('data/scalar2', dummy_s2[0], n_iter)
22
23 writer.add_scalars('data/scalar_group', {'xsinx': n_iter * np.sin(n_iter),
24 'xcosx': n_iter * np.cos(n_iter),
25 'arctanx': np.arctan(n_iter)}, n_iter)
26
27 dummy_img = torch.rand(32, 3, 64, 64) # output from network
28 if n_iter % 10 == 0:
29 x = vutils.make_grid(dummy_img, normalize=True, scale_each=True)
30 writer.add_image('Image', x, n_iter)
31
32 dummy_audio = torch.zeros(sample_rate * 2)
33 for i in range(x.size(0)):
34 # amplitude of sound should in [-1, 1]
35 dummy_audio[i] = np.cos(freqs[n_iter // 10] * np.pi * float(i) / float(sample_rate))
36 writer.add_audio('myAudio', dummy_audio, n_iter, sample_rate=sample_rate)
37
38 writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
39
40 for name, param in resnet18.named_parameters():
41 writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)
42
43 # needs tensorboard 0.4RC or later
44 writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(100), n_iter)
45
46dataset = datasets.MNIST('mnist', train=False, download=True)
47images = dataset.test_data[:100].float()
48label = dataset.test_labels[:100]
49
50features = images.view(100, 784)
51writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
52
53# export scalar data to JSON for external processing
54writer.export_scalars_to_json("./all_scalars.json")
55writer.close()