naive bayes classifier calculating sigma

Solutions on MaxInterview for naive bayes classifier calculating sigma by the best coders in the world

showing results for - "naive bayes classifier calculating sigma"
Teo
24 Jan 2019
1# Naive Bayes On The Iris Dataset
2from csv import reader
3from random import seed
4from random import randrange
5from math import sqrt
6from math import exp
7from math import pi
8
9# Load a CSV file
10def load_csv(filename):
11	dataset = list()
12	with open(filename, 'r') as file:
13		csv_reader = reader(file)
14		for row in csv_reader:
15			if not row:
16				continue
17			dataset.append(row)
18	return dataset
19
20# Convert string column to float
21def str_column_to_float(dataset, column):
22	for row in dataset:
23		row[column] = float(row[column].strip())
24
25# Convert string column to integer
26def str_column_to_int(dataset, column):
27	class_values = [row[column] for row in dataset]
28	unique = set(class_values)
29	lookup = dict()
30	for i, value in enumerate(unique):
31		lookup[value] = i
32	for row in dataset:
33		row[column] = lookup[row[column]]
34	return lookup
35
36# Split a dataset into k folds
37def cross_validation_split(dataset, n_folds):
38	dataset_split = list()
39	dataset_copy = list(dataset)
40	fold_size = int(len(dataset) / n_folds)
41	for _ in range(n_folds):
42		fold = list()
43		while len(fold) < fold_size:
44			index = randrange(len(dataset_copy))
45			fold.append(dataset_copy.pop(index))
46		dataset_split.append(fold)
47	return dataset_split
48
49# Calculate accuracy percentage
50def accuracy_metric(actual, predicted):
51	correct = 0
52	for i in range(len(actual)):
53		if actual[i] == predicted[i]:
54			correct += 1
55	return correct / float(len(actual)) * 100.0
56
57# Evaluate an algorithm using a cross validation split
58def evaluate_algorithm(dataset, algorithm, n_folds, *args):
59	folds = cross_validation_split(dataset, n_folds)
60	scores = list()
61	for fold in folds:
62		train_set = list(folds)
63		train_set.remove(fold)
64		train_set = sum(train_set, [])
65		test_set = list()
66		for row in fold:
67			row_copy = list(row)
68			test_set.append(row_copy)
69			row_copy[-1] = None
70		predicted = algorithm(train_set, test_set, *args)
71		actual = [row[-1] for row in fold]
72		accuracy = accuracy_metric(actual, predicted)
73		scores.append(accuracy)
74	return scores
75
76# Split the dataset by class values, returns a dictionary
77def separate_by_class(dataset):
78	separated = dict()
79	for i in range(len(dataset)):
80		vector = dataset[i]
81		class_value = vector[-1]
82		if (class_value not in separated):
83			separated[class_value] = list()
84		separated[class_value].append(vector)
85	return separated
86
87# Calculate the mean of a list of numbers
88def mean(numbers):
89	return sum(numbers)/float(len(numbers))
90
91# Calculate the standard deviation of a list of numbers
92def stdev(numbers):
93	avg = mean(numbers)
94	variance = sum([(x-avg)**2 for x in numbers]) / float(len(numbers)-1)
95	return sqrt(variance)
96
97# Calculate the mean, stdev and count for each column in a dataset
98def summarize_dataset(dataset):
99	summaries = [(mean(column), stdev(column), len(column)) for column in zip(*dataset)]
100	del(summaries[-1])
101	return summaries
102
103# Split dataset by class then calculate statistics for each row
104def summarize_by_class(dataset):
105	separated = separate_by_class(dataset)
106	summaries = dict()
107	for class_value, rows in separated.items():
108		summaries[class_value] = summarize_dataset(rows)
109	return summaries
110
111# Calculate the Gaussian probability distribution function for x
112def calculate_probability(x, mean, stdev):
113	exponent = exp(-((x-mean)**2 / (2 * stdev**2 )))
114	return (1 / (sqrt(2 * pi) * stdev)) * exponent
115
116# Calculate the probabilities of predicting each class for a given row
117def calculate_class_probabilities(summaries, row):
118	total_rows = sum([summaries[label][0][2] for label in summaries])
119	probabilities = dict()
120	for class_value, class_summaries in summaries.items():
121		probabilities[class_value] = summaries[class_value][0][2]/float(total_rows)
122		for i in range(len(class_summaries)):
123			mean, stdev, _ = class_summaries[i]
124			probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
125	return probabilities
126
127# Predict the class for a given row
128def predict(summaries, row):
129	probabilities = calculate_class_probabilities(summaries, row)
130	best_label, best_prob = None, -1
131	for class_value, probability in probabilities.items():
132		if best_label is None or probability > best_prob:
133			best_prob = probability
134			best_label = class_value
135	return best_label
136
137# Naive Bayes Algorithm
138def naive_bayes(train, test):
139	summarize = summarize_by_class(train)
140	predictions = list()
141	for row in test:
142		output = predict(summarize, row)
143		predictions.append(output)
144	return(predictions)
145
146# Test Naive Bayes on Iris Dataset
147seed(1)
148filename = 'iris.csv'
149dataset = load_csv(filename)
150for i in range(len(dataset[0])-1):
151	str_column_to_float(dataset, i)
152# convert class column to integers
153str_column_to_int(dataset, len(dataset[0])-1)
154# evaluate algorithm
155n_folds = 5
156scores = evaluate_algorithm(dataset, naive_bayes, n_folds)
157print('Scores: %s' % scores)
158print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))