From f59a6392faaa2d4fe22af8523c63c979b428d620 Mon Sep 17 00:00:00 2001 From: Muttaqin Date: Sun, 7 Jul 2019 13:26:48 +0700 Subject: [PATCH] first commit --- README.md | 1 + ai_ga.py | 274 ++++++++++++++++++++++++++++++++++++++++ ai_nn.py | 280 +++++++++++++++++++++++++++++++++++++++++ file_model.py | 120 ++++++++++++++++++ main.py | 337 ++++++++++++++++++++++++++++++++++++++++++++++++++ step_info.py | 76 ++++++++++++ 6 files changed, 1088 insertions(+) create mode 100644 README.md create mode 100755 ai_ga.py create mode 100755 ai_nn.py create mode 100755 file_model.py create mode 100755 main.py create mode 100755 step_info.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..710363b --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# pid-tuning-ai diff --git a/ai_ga.py b/ai_ga.py new file mode 100755 index 0000000..3cd5539 --- /dev/null +++ b/ai_ga.py @@ -0,0 +1,274 @@ +import numpy as np +import random +import time +from scipy.ndimage.filters import gaussian_filter1d +from step_info import StepInfo + +class DNA: + ''' + This is class for DNA to store properties value or do a action for population object + ''' + def __init__(self, kp, ki, kd): + # Store properties of DNA's individu + self.kp = kp + self.ki = ki + self.kd = kd + + self.fitness = 0 + self.normalize_prob = 0 + + self.risetime = 0 + self.overshoot = 0 + self.settling_time = 0 + self.peak = 0 + self.steadystate = 0 + + self.saved = 0 + self.creator = "random" + + self.x_step = np.array([]) + self.y_step = np.array([]) + + self.serial = None + + def calculate_fitness(self, max_step, sp, serial): + # calculate fitness from step control of PID + self.serial = serial + + #change this with communication protocol + self.serial.write("k "+ str(self.kp) + " "+ str(self.ki) + " "+ str(self.kd) + " "+ str(sp) + " "+ str(max_step)) + + print "\t\tKP:",self.kp," KI:",self.ki," KD:",self.kd + print "\t\tSetpoint:",sp + + x_list = [] + y_list = [] + + step = 0 + data = 0 + + while len(y_list) < max_step: + data_serial = self.serial.readline() + data_serial = data_serial.replace('\r','') + data_serial = data_serial.replace('\n','') + try: + data = float(data_serial) + except ValueError: + data = data + + step += 1 + + x_list.append(step) + y_list.append(data) + print "\t\t\tStep:",step," Height:",data + + #smoothing graph plot of y value + ysmoothed = gaussian_filter1d(y_list, sigma=2) + + #finding step info from each iteration + info = StepInfo(x_list, ysmoothed, sp) + + self.risetime = info.getRiseTime() + self.overshoot = info.getOvershoot() + self.peak = info.getPeak() + self.settling_time = info.getSettlingTime() + self.steadystate = info.getSteadyStateError() + + self.y_step = ysmoothed + self.x_step = x_list + + if self.settling_time == 0: + hitung_settling = len(y_list) + else: + hitung_settling = self.settling_time + + #fitness function calculating from step info + #self.fitness = 100/(self.risetime+(self.overshoot*self.overshoot)+self.peak+hitung_settling+self.steadystate) + self.fitness = 1.0/info.getMSE() + + print "\t\tRiseTime:",self.risetime," Overshoot:",self.overshoot," Peak:",self.peak," SettlingTime:",self.settling_time," Steadystate Error:", self.steadystate + print "\t\tFitness:",self.fitness + + time.sleep(5) + + +class Population: + ''' + This class contains individual and evolution function. + Main class for genetic algorithm + ''' + population = [] + max = False + + def __init__(self, mutation_rate = 0.3, crossover_rate = 0.7, + max_population = 100, max_timestep = 10, max_gain_value = 1, min_gain = 1,max_gain = 10, + max_generate_initial_population = 100, setpoint = 30, serial = None): + + self.serial = serial + + self.properties = { + "MutationRate" : mutation_rate, + "CrossoverRate" : crossover_rate, + "MaxPopulation" : max_population, + "MaxTimestep" : max_timestep, + "MinGain" : min_gain, + "MaxGain" : max_gain, + "MaxGenerateInitial" : max_generate_initial_population, + "MaxGainValue" : max_gain_value, + "SetPoint" : setpoint + } + + if mutation_rate != None: + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.max_population = max_population + self.max_timestep = max_timestep + self.min_gain = min_gain + self.max_gain = max_gain + self.max_generate_initial_population = max_generate_initial_population + self.max_gain_value = max_gain_value + self.setpoint = setpoint + + self.properties = { + "MutationRate" : self.mutation_rate, + "CrossoverRate" : self.crossover_rate, + "MaxPopulation" : self.max_population, + "MaxTimestep" : self.max_timestep, + "MinGain" : self.min_gain, + "MaxGain" : self.max_gain, + "MaxGenerateInitial" : self.max_generate_initial_population, + "MaxGainValue" : self.max_gain_value, + "SetPoint" : self.setpoint + } + + def setProperties(self, prop): + self.mutation_rate = prop["MutationRate"] + self.crossover_rate = prop["CrossoverRate"] + self.max_population = prop["MaxPopulation"] + self.max_timestep = prop["MaxTimestep"] + self.min_gain = prop["MinGain"] + self.max_gain = prop["MaxGain"] + self.max_generate_initial_population = prop["MaxGenerateInitial"] + self.max_gain_value = prop["MaxGainValue"] + self.setpoint = prop["SetPoint"] + + def setPopulation(self, popu): + for i in range(len(popu)): + self.population.append(i) + + self.population[i] = DNA( + float(popu[i][0]), + float(popu[i][1]), + float(popu[i][2]) + ) + + self.population[i].fitness = float(popu[i][3]) + self.population[i].risetime = float(popu[i][4]) + self.population[i].overshoot = float(popu[i][5]) + self.population[i].settling_time = float(popu[i][6]) + self.population[i].peak = float(popu[i][7]) + self.population[i].steadystate = float(popu[i][8]) + self.population[i].creator = popu[i][9] + self.population[i].saved = popu[i][10] + + def generate_initial_population(self): + #function for generate random individu in first iteration of evolution process + random.seed() + for i in range(self.max_generate_initial_population): + self.population.append(i) + self.population[i] = DNA(random.uniform(self.min_gain, self.max_gain), + random.uniform(self.min_gain, self.max_gain*0.6), + random.uniform(self.min_gain, self.max_gain*0.6)) + + def add_to_population(self, individu): + size_pop = len(self.population) + self.population.append(size_pop+1) + self.population[size_pop] = individu + + def pick_parent(self): + #pick parent based on normalize probability + random.seed() + index = 0 + r = random.random() + while(r > 0): + r = r - self.population[index].normalize_prob + index = index + 1 + index = index - 1 + return index + + def pick_best(self): + i_best = np.amax(self.population, axis = 0)[3] + for i in range(len(self.population)): + if self.population[i][3] == i_best: + break + return self.population[i] + + def selection(self): + sum_prob = 0 + parents = [] + + s_population = len(self.population) + + print "\tCalculating Fitness..." + for i in range(s_population): + if self.population[i].fitness == 0: + print "\t\tPopulation Index:",i + self.population[i].calculate_fitness(self.max_timestep, self.setpoint, self.serial) + + for i in range(s_population): + sum_prob = sum_prob + self.population[i].fitness + for i in range(s_population): + self.population[i].normalize_prob = self.population[i].fitness/float(sum_prob) + + if s_population >= self.max_population: + self.max = True + + parentA = self.pick_parent() + parentB = self.pick_parent() + while parentA == parentB: + parentB = self.pick_parent() + parentA = self.population[parentA] + parentB = self.population[parentB] + parents = [parentA,parentB] + + return parents + + def mutation(self, parent): + flag = False + random.seed() + if random.random() < self.mutation_rate: + #adding old value with a very small random number + parent.kp = parent.kp + random.random() / self.max_gain_value + parent.ki = parent.ki + random.random() / self.max_gain_value + parent.kd = parent.kd + random.random() / self.max_gain_value + + parent.fitness = 0 + + #parent.creator = "mutation" + + def crossover(self,parentA, parentB): + flag = False + random.seed() + if random.random() < self.crossover_rate: + child = [] + for i in range(6): + child.append(i) + child[0] = DNA(parentA.kp,parentA.ki,parentB.kd) + child[1] = DNA(parentA.kp,parentB.ki,parentB.kd) + child[2] = DNA(parentA.kp,parentB.ki,parentA.kd) + child[3] = DNA(parentB.kp,parentB.ki,parentA.kd) + child[4] = DNA(parentB.kp,parentA.ki,parentA.kd) + child[5] = DNA(parentB.kp,parentA.ki,parentB.kd) + for i in range(6): + for x in self.population: + #filter from duplicate DNA data + if x.kp == child[i].kp and x.ki == child[i].ki and x.kd == child[i].kd: + flag = True + + #filter from data which kp < ki & kd + if child[i].kp < child[i].ki and child[i].kp < child[i].kd: + flag = True + + if flag == False: + self.add_to_population(child[i]) + #child[i].creator = "crossover" diff --git a/ai_nn.py b/ai_nn.py new file mode 100755 index 0000000..8c773f6 --- /dev/null +++ b/ai_nn.py @@ -0,0 +1,280 @@ +import numpy as np + +class ActivationFunction: + def __init__(self, func,dfunc): + self.func = func + self.dfunc = dfunc + +sigmoid = ActivationFunction( + lambda x:1 / (1 + np.exp(-x)), + lambda y:y * (1 - y) +) +linear = ActivationFunction( + lambda x:x, + lambda y:1 +) + + +tanh = ActivationFunction( + lambda x:np.tanh(x), + lambda y:1 - (y * y) +) + +relu = ActivationFunction( + lambda x:x * (x > 0), + lambda y:1. * (y > 0) +) + + +class NeuralNetwork: + def __init__(self,i_nodes, h_nodes, o_nodes, open = False): + self.risetime_max = 0 + self.risetime_min = 0 + + self.overshoot_max = 0 + self.overshoot_min = 0 + + self.settling_max = 0 + self.settling_min = 0 + + self.peak_max = 0 + self.peak_min = 0 + + self.steady_max = 0 + self.steady_min = 0 + + if isinstance(i_nodes, NeuralNetwork): + temp = i_nodes + + self.input_nodes = temp.input_nodes + self.hidden_nodes = temp.hidden_nodes + self.output_nodes = temp.output_nodes + + self.weights_ih = temp.weights_ih.copy() + self.weights_ho = temp.weights_ho.copy() + + self.bias_h = temp.bias_h.copy() + self.bias_o = temp.bias_o.copy() + else: + self.input_nodes = i_nodes + self.hidden_nodes = h_nodes + self.output_nodes = o_nodes + + if open == False: + random_func = lambda x:x*2-1 + + self.weights_ih = np.random.rand(self.input_nodes, self.hidden_nodes) + func_ih = np.vectorize(random_func) + self.weights_ih = np.matrix(func_ih(self.weights_ih)) + + self.weights_ho = np.random.rand(self.hidden_nodes, self.output_nodes) + + func_ho = np.vectorize(random_func) + self.weights_ho = np.matrix(func_ho(self.weights_ho)) + + + self.bias_h = np.random.rand(1, self.hidden_nodes) + func_bias_h = np.vectorize(random_func) + self.bias_h = np.matrix(func_bias_h(self.bias_h)) + + self.bias_o = np.random.rand(1, self.output_nodes) + func_bias_o = np.vectorize(random_func) + self.bias_o = np.matrix(func_bias_o(self.bias_o)) + + + self.setActivation("sigmoid") + self.setLearningRate(0.1) + + + def setActivation(self, func): + if func == "sigmoid": + self.activation_function = sigmoid + elif func == "tanh": + self.activation_function = tanh + elif func == "relu": + self.activation_function = relu + elif func == "linear": + self.activation_function = linear + self.activation_function_o = linear + + def save(self): + np.save("weights_ih", self.weights_ih) + np.save("weights_ho", self.weights_ho) + np.save("bias_h", self.bias_h) + np.save("bias_o", self.bias_o) + + np.save("rt", ([self.risetime_min,self.risetime_max])) + np.save("os", ([self.overshoot_min,self.overshoot_max])) + np.save("st", ([self.settling_min,self.settling_max])) + np.save("pk", ([self.peak_min,self.peak_max])) + np.save("se", ([self.steady_min,self.steady_max])) + + def normalize(self, x): + temp = [] + x1 = [] + x2 = [] + x3 = [] + x4 = [] + x5 = [] + for i in range(len(x)-1): + flag = False + if i == 0: + temp.append(x[0]) + else: + for j in range(len(temp)): + if(x[i][0] == temp[j][0] and x[i][1] == temp[j][1] and x[i][2] == temp[j][2]): + flag = True + if flag == False: + temp.append(x[i]) + x1.append(float(x[i][4])) + x2.append(float(x[i][5])) + x3.append(float(x[i][6])) + x4.append(float(x[i][7])) + x5.append(float(x[i][8])) + + + self.risetime_max = np.amax(x1,axis=0) + self.risetime_min = np.amin(x1,axis=0) + + self.overshoot_max = np.amax(x2,axis=0) + self.overshoot_min = np.amin(x2,axis=0) + + self.settling_max = np.amax(x3,axis=0) + self.settling_min = np.amin(x3,axis=0) + + self.peak_max = np.amax(x4,axis=0) + self.peak_min = np.amin(x4,axis=0) + + self.steady_max = np.amax(x5,axis=0) + self.steady_min = np.amin(x5,axis=0) + + return temp + + def setLearningRate(self, r): + self.learning_rate = r + + def funcMap(self, input, func): + temp = input.copy() + val = 0 + for i in range (input.shape[0]): + for j in range (input.shape[1]): + val = input[i,j] + temp[i,j] = func(val) + return temp + + def predict_from_model(self, input): + self.weights_ih = np.load("weights_ih.npy") + self.weights_ho = np.load("weights_ho.npy") + + self.bias_h = np.load("bias_h.npy") + self.bias_o = np.load("bias_o.npy") + + rt = np.load("rt.npy") + os = np.load("os.npy") + st = np.load("st.npy") + pk = np.load("pk.npy") + se = np.load("se.npy") + + temp = input + temp[0] = np.interp(input[0],rt,[0,1]) + temp[1] = np.interp(input[1],os,[0,1]) + temp[2] = np.interp(input[2],st,[0,1]) + temp[3] = np.interp(input[3],pk,[0,1]) + temp[4] = np.interp(input[4],se,[0,1]) + + input = np.matrix(temp) + + hidden = np.dot(input, self.weights_ih) + hidden = np.add(hidden, self.bias_h) + + hidden = self.funcMap(hidden, self.activation_function.func) + + output = np.dot(hidden, self.weights_ho) + output = np.add(output, self.bias_o) + + + output = self.funcMap(output, self.activation_function_o.func) + + return output + + def predict(self, input): + temp = input + temp[0] = np.interp(input[0],[self.risetime_min,self.risetime_max],[0,1]) + temp[1] = np.interp(input[1],[self.overshoot_min,self.overshoot_max],[0,1]) + temp[2] = np.interp(input[2],[self.settling_min,self.settling_max],[0,1]) + temp[3] = np.interp(input[3],[self.peak_min,self.peak_max],[0,1]) + temp[4] = np.interp(input[4],[self.steady_min,self.steady_max],[0,1]) + + input = np.matrix(temp) + + hidden = np.dot(input, self.weights_ih) + hidden = np.add(hidden, self.bias_h) + + hidden = self.funcMap(hidden, self.activation_function.func) + + output = np.dot(hidden, self.weights_ho) + output = np.add(output, self.bias_o) + + + output = self.funcMap(output, self.activation_function_o.func) + + return output + + def train(self, input, target): + temp = input + temp[0] = np.interp(input[0],[self.risetime_min,self.risetime_max],[0,1]) + temp[1] = np.interp(input[1],[self.overshoot_min,self.overshoot_max],[0,1]) + temp[2] = np.interp(input[2],[self.settling_min,self.settling_max],[0,1]) + temp[3] = np.interp(input[3],[self.peak_min,self.peak_max],[0,1]) + temp[4] = np.interp(input[4],[self.steady_min,self.steady_max],[0,1]) + + input = np.matrix(temp) + + #Generate hidden input calculation, and add with bias + hidden = np.dot(input, self.weights_ih) + hidden = np.add(hidden, self.bias_h) + + #Activate hidden node + hidden = self.funcMap(hidden, self.activation_function.func) + + #Generate Output of output node, and add with bias + output = np.dot(hidden, self.weights_ho) + output = np.add(output, self.bias_o) + + #Activate output node + output = self.funcMap(output, self.activation_function_o.func) + + #Calculate error of system prediction + output_error = np.subtract(target, output) + #print output_error[0, 0] + + #Calculate the gradien of output + gradient = self.funcMap(output, self.activation_function_o.dfunc) + gradient = np.multiply(gradient, output_error) + gradient = np.multiply(gradient, self.learning_rate) + + #Calculate deltas + hidden_t = np.transpose(hidden) + weight_ho_delta = np.dot(hidden_t,gradient) + + #Adjust weight by the deltas + self.weights_ho = np.add(self.weights_ho, weight_ho_delta) + + #Adjust bias by the gradien + self.bias_o = np.add(self.bias_o, gradient) + + #Calculate hidden layer error + who_t = np.transpose(self.weights_ho) + hidden_error = np.dot(output_error, who_t) + + #Calculate hidden gradien + hidden_gradien = self.funcMap(hidden, self.activation_function.dfunc) + hidden_gradien = np.multiply(hidden_gradien, hidden_error) + hidden_gradien = np.multiply(hidden_gradien, self.learning_rate) + + #calculate input node to hidden node deltas + input_T = np.transpose(input) + weight_ih_delta = np.dot(input_T, hidden_gradien) + + self.weights_ih = np.add(self.weights_ih, weight_ih_delta) + self.bias_h = np.add(self.bias_h, hidden_gradien) \ No newline at end of file diff --git a/file_model.py b/file_model.py new file mode 100755 index 0000000..d2271e1 --- /dev/null +++ b/file_model.py @@ -0,0 +1,120 @@ +import os +import json +import csv +import numpy as np +from matplotlib import pyplot as plt + +class FileModel: + ''' + This class for opening or saving model of population or properties from genetic AI + ''' + def __init__(self, model_name): + self.dict = os.getcwd() + + self.model_name = model_name + self.name_population = self.model_name + '_population.csv' + self.name_properties = self.model_name + '_properties.json' + + os.system("mkdir -p genetic_data/"+self.model_name) + os.system("mkdir -p genetic_data/"+self.model_name+"/graph") + os.system("mkdir -p genetic_data/"+self.model_name+"/step") + + def save_population_to_model(self, population): + temp_population = [] + + for i in range(len(population)): + temp_population.append(i) + temp_population[i] = [ + population[i].kp, + population[i].ki, + population[i].kd, + population[i].fitness, + population[i].risetime, + population[i].overshoot, + population[i].settling_time, + population[i].peak, + population[i].steadystate, + population[i].creator, + population[i].saved + ] + + with open(self.dict + "/genetic_data/" + self.model_name + "/" + self.name_population, 'w') as population_file: + population_writer= csv.writer(population_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) + for i in range(len(population)): + population_writer.writerow(temp_population[i]) + + def save_properties_to_model(self, properties): + with open(self.dict + "/genetic_data/" + self.model_name + "/" + self.name_properties, 'w') as fp: + json.dump(properties, fp) + + def save_individu_to_model(self, individu): + temp_population = [ + individu.kp, + individu.ki, + individu.kd, + individu.fitness, + individu.risetime, + individu.overshoot, + individu.settling_time, + individu.peak, + individu.steadystate, + individu.creator, + individu.saved + ] + os.system("mkdir -p genetic_data/"+self.model_name) + with open(self.dict + "/genetic_data/" + self.model_name + "/" + self.name_population, 'a') as population_file: + population_writer= csv.writer(population_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) + population_writer.writerow(temp_population) + + def save_individu_to_graph(self, x_list, y_list, setpoint, name): + sp_list = np.full(len(x_list), setpoint) + plt.figure(name) + plt.plot(x_list, y_list, label="control data") + plt.plot(x_list, sp_list, label="setpoint") + plt.savefig(self.dict + "/genetic_data/" + self.model_name + "/graph/" + str(name) + ".png") + plt.close(name) + + temp_step= [] + + for i in range(len(x_list)): + temp_step.append(i) + temp_step[i] = [ + x_list[i], + y_list[i] + ] + + with open(self.dict + "/genetic_data/" + self.model_name + "/step/" + str(name) + ".csv", 'w') as step_file: + step_writer = csv.writer(step_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) + for i in range(len(x_list)): + step_writer.writerow(temp_step[i]) + + + def open_population_from_model(self): + population_temp = [] + fol = self.dict + "/genetic_data/" + self.model_name + "/" + self.name_population + with open(fol) as csv_file: + csv_reader = csv.reader(csv_file, delimiter=',') + line_count = 0 + for row in csv_reader: + population_temp.append(1) + population_temp[line_count] = [ + row[0], + row[1], + row[2], + row[3], + row[4], + row[5], + row[6], + row[7], + row[8], + row[9], + row[10] + ] + line_count += 1 + return population_temp + + def open_properties_from_model(self): + fol = self.dict + "/genetic_data/" + self.model_name + "/" + self.name_properties + with open(fol) as json_file: + pooldata = json.load(json_file) + return pooldata \ No newline at end of file diff --git a/main.py b/main.py new file mode 100755 index 0000000..bb0eb98 --- /dev/null +++ b/main.py @@ -0,0 +1,337 @@ +import numpy as np +import time +import serial +import sys +import random +import csv +from ai_ga import DNA, Population +from ai_nn import NeuralNetwork +from file_model import FileModel + + +def ga(auto = False): + if auto == False: + i_m = raw_input("Open Genetic properties ?(y/n): ") + else: + i_m = "y" + + if i_m == "y": + if auto == True: + MUTATION_RATE = 0.3 + CROSSOVER_RATE = 0.7 + MAX_POPULATION = 1000 + MAX_TIMESTEP = 10000 + MAX_GAIN_VALUE = 2.5 + MIN_GAIN = 0 + MAX_GAIN = 10 + MAX_INIT_POPULATION = 5 + SETPOINT = 26.50 + pop = Population( + MUTATION_RATE, + CROSSOVER_RATE, + MAX_POPULATION, + MAX_TIMESTEP, + MAX_GAIN_VALUE, + MIN_GAIN, + MAX_GAIN, + MAX_INIT_POPULATION, + SETPOINT, + SERIAL + ) + file_model.save_properties_to_model(pop.properties) + print "Properties created" + else: + properties = file_model.open_properties_from_model() + pop = Population( + None, + None, + None, + None, + None, + None, + None, + None, + None, + SERIAL + ) + pop.setProperties(properties) + print "Properties Opened" + else: + print "Please input genetic properties" + MUTATION_RATE = float(raw_input("Mutation rate (0~1): ")) + CROSSOVER_RATE = float(raw_input("Crossover rate (0~1): ")) + MAX_POPULATION = input("Maximal Population: ") + MAX_TIMESTEP = input("Maximal Step: ") + MAX_GAIN_VALUE = input("Maximal gain value: ") + MIN_GAIN = input("Minimal gain random: ") + MAX_GAIN = input("Maximal gain random: ") + MAX_INIT_POPULATION = input("Maximal initial population: ") + SETPOINT = input("Setpoint: ") + + pop = Population( + MUTATION_RATE, + CROSSOVER_RATE, + MAX_POPULATION, + MAX_TIMESTEP, + MAX_GAIN_VALUE, + MIN_GAIN, + MAX_GAIN, + MAX_INIT_POPULATION, + SETPOINT, + SERIAL + ) + print "Properties created" + + if auto == False: + i_m2 = raw_input("Open population model?(y/n): ") + else: + i_m2 = 'n' + + if i_m2 == 'y': + popu = file_model.open_population_from_model() + pop.setPopulation(popu) + print "Population opened" + else: + print "AI > Genetics > Generating Initial Population..." + pop.generate_initial_population() + print "AI > Genetics > Generating random population done!" + + print "AI > Genetcis > Starting..." + while True: + print "AI > Genetics > Selection..." + parent = pop.selection() + + print "AI > Genetics > Crossover..." + pop.crossover(parent[0],parent[1]) + + print "AI > Genetics > Mutation..." + pop.mutation(parent[0]) + + for i in range (len(pop.population)): + if pop.population[i].saved == 0 and pop.population[i].fitness != 0: + file_model.save_individu_to_model(pop.population[i]) + file_model.save_individu_to_graph(pop.population[i].x_step, pop.population[i].y_step, pop.setpoint, i) + pop.population[i].saved = 1 + + print "AI > Genetics > Population size: ",len(pop.population) + + if pop.max == True: + print "AI > Genetics > Max Population reached!" + print "AI > Genetics > Saving properties and population model..." + file_model.save_population_to_model(pop.population) + file_model.save_properties_to_model(pop.properties) + print "AI > Genetics > Done!" + break + +def nn(): + nn = NeuralNetwork(5, 12, 3) + data_ga = file_model.open_population_from_model() + data_ga = nn.normalize(data_ga) + len_ga = len(data_ga) + dataset = [{}] + + print "AI > Neural > Creating dataset from Genetic population..." + + for i in range(len_ga-1): + dataset.append({ + "output" : [float(data_ga[i][0]),float(data_ga[i][1]),float(data_ga[i][2])], + "input" : [float(data_ga[i][4]),float(data_ga[i][5]),float(data_ga[i][6]),float(data_ga[i][7]),float(data_ga[i][8])] + }) + + random.seed() + print "AI > Neural > Preparing for training with default iteration (1000)..." + for i in range(100000): + print "AI > Neural > Training iteration",i+1 + index = int(random.uniform(0,len_ga)) + if index > len_ga-1: + index = len_ga-1 + if index == 0: + index = 1 + nn.train(dataset[index]["input"], np.matrix(dataset[index]["output"])) + + print "AI > Neural > Training Done!" + + print "AI > Neural > Predicting the best tuning for PID..." + risetime = 25 + overshoot = 5 + settling = 50 + peak = 26 + steady = 0 + + output = nn.predict([risetime,overshoot,settling,peak,steady]) + print "" + print "\tKP:",output[0,0]," KI:",output[0,1]," KD:",output[0,2] + print "" + print "AI > Neural > Done!" + + d = raw_input("AI > Neural > Save Weight and Bias? (y/n):") + if d == 'y': + nn.save() + +def nn_predict(): + c = 'y' + + nn = NeuralNetwork(5,12,3) + + while c == 'y': + c = raw_input("AI > Neural > Try prediction? (y/n):") + if c == 'n': + break + + risetime = input("rise time:") + overshoot = input("overshoot:") + settling = input("settling time:") + peak = input("peak:") + steady = input("steady-state error:") + output = nn.predict_from_model([risetime,overshoot,settling,peak,steady]) + + print "" + print "\tKP:",output[0,0]," KI:",output[0,1]," KD:",output[0,2] + print "" + print "AI > Neural > Done!" + +def keras(): + from keras.models import model_from_json + import numpy as np + + json_file = open('model.json', 'r') + loaded_model_json = json_file.read() + json_file.close() + loaded_model = model_from_json(loaded_model_json) + # load weights into new model + loaded_model.load_weights("model.h5") + print("Loaded model from disk") + + # evaluate loaded model on test data + loaded_model.compile(loss='mse', optimizer='rmsprop') + + risetime = 25 + overshoot = 1 + settling = 50 + peak = 26 + steady = 0.0001 + + predict = np.array([[risetime],[overshoot],[settling],[peak],[steady]]) + predict = np.transpose(predict) + + print(loaded_model.predict(predict)) + ''' + from keras.models import Sequential, model_from_json + from keras.layers.core import Dense, Dropout, Activation + from keras.optimizers import SGD + + data_ga = [[]] + fol = "tes.csv" + with open(fol) as csv_file: + csv_reader = csv.reader(csv_file, delimiter=',') + line_count = 0 + for row in csv_reader: + data_ga.append(1) + data_ga[line_count] = [ + row[0], + row[1], + row[2], + row[3], + row[4], + row[5], + row[6], + row[7], + row[8], + row[9], + row[10] + ] + line_count += 1 + + len_ga = len(data_ga) + + input = [] + output = [] + + for i in range(len_ga-1): + input.append((float(data_ga[i][4]),float(data_ga[i][5]),float(data_ga[i][6]),float(data_ga[i][7]),float(data_ga[i][8]))) + output.append(([float(data_ga[i][0]),float(data_ga[i][1]),float(data_ga[i][2])])) + + + x = np.array(input) + y = np.array(output) + model = Sequential() + + model.add(Dense(45,activation='relu')) + model.add(Dense(30,activation='relu')) + model.add(Dense(12,activation='relu')) + model.add(Dense(3)) + + sgd = SGD(lr=0.1) + model.compile(loss='mse', optimizer='rmsprop') + + model.fit(x, y, batch_size=50, epochs=1000) + + model_json = model.to_json() + with open("model.json", "w") as json_file: + json_file.write(model_json) + # serialize weights to HDF5 + model.save_weights("model.h5") + + risetime = 29 + overshoot = 3.34052031556013 + settling = 30 + peak = 25.8346100019 + steady = 0.000503265310076 + #1.83398628969672,1.58367958575964,0.309875038875906 + + predict = np.array([[risetime],[overshoot],[settling],[peak],[steady]]) + predict = np.transpose(predict) + + print(model.predict_proba(predict)) + ''' + + + + +SERIAL = serial.Serial("/dev/ttyACM0", 115200) + +print "Opening Serial..." +#time.sleep(2) +print "Done!" + +while True: + i_c = raw_input("Calibrating ESC ?(y/n): ") + if i_c == 'y': + SERIAL.write('c') + time.sleep(5) + print "Calibrating done (sending 1000ms signal to ESC)!" + elif i_c == 'n': + break + else: + print "Input must 'y' (yes) or 'n' (no)!" + +print "\t\t\t\t-----------------------------------------------" +print "\t\t\t\t PID Tuning with Artificial Intelegence method" +print "\t\t\t\t Tower Copter Control" +print "\t\t\t\t M.Imam Muttaqin" +print "\t\t\t\t-----------------------------------------------" +print "\t\t\t\t Select Mode > 1.Automatic (use default-conf)" +print "\t\t\t\t > 2.Genetic Algorithm (Hard Tune)" +print "\t\t\t\t > 3.Neural Network (Soft Tune)" +print "\t\t\t\t > 4.TensorFlow (Soft Tune)" +print "\t\t\t\t > 5.NN Predict" +print "\t\t\t\t > 99.Exit" + +i_mode = input("\t\t\t\t : ") + +name_model = raw_input("Enter name of models: ") +file_model = FileModel(name_model) + +if i_mode == 1: + ga(True) + nn() +elif i_mode == 2: + ga() +elif i_mode == 3: + nn() +elif i_mode == 4: + keras() +elif i_mode == 5: + nn_predict() +else: + sys.exit() diff --git a/step_info.py b/step_info.py new file mode 100755 index 0000000..7ed53c7 --- /dev/null +++ b/step_info.py @@ -0,0 +1,76 @@ +import numpy as np + +class StepInfo: + ''' + This class is for calculate and find step info from control data system + step data require for calculate fitness function in genetic algorithm + ''' + def __init__(self, x_list, y_list, setpoint): + self.x_list = x_list + self.y_list = y_list + self.setpoint = setpoint + self.rise_time_value = 0.9 * self.setpoint + self.rise_time_index = 0 + self.error_band = 0.05 * self.setpoint + self.low_band = self.setpoint - self.error_band + self.high_band = self.setpoint + self.error_band + + def getRiseTime(self): + #rise time is time for system from the beginning until system get 90% of setpoint + self.rise_time_index = 0 + while self.rise_time_index= self.rise_time_value: + break + self.rise_time_index += 1 + return self.x_list[self.rise_time_index-1] + + def getPeak(self): + #peak value is maximum y data of the system + return np.amax(self.y_list) + + def getPeakTime(self): + #peak time is time for system (or index) when system get 'peak' level + l = np.where(self.y_list == np.amax(self.y_list)) + index = l[0][0] + return self.x_list[index] + + def getOvershoot(self): + #overshoot is percentage of 'peak' level according the last final data of y + y_len = len(self.y_list) - 1 + last_y = self.y_list[y_len] + return (self.getPeak() - self.setpoint)/(self.setpoint*1.0)*100 + + ''' + y_len = len(self.y_list) - 1 + last_y = self.y_list[y_len] + return (self.getPeak() - last_y)/(last_y*1.0)*100 + ''' + + def getMSE(self): + square_error = [] + for step in self.y_list: + square_error.append(self.setpoint-step) + + square_error = np.square(square_error) + + return np.mean(square_error) + + + def getSettlingTime(self): + #settling time is range of time when system get atleast 5% of setpoint + flag = [] + for i in range (len(self.y_list)): + if self.y_list[i] >= self.low_band and self.y_list[i] <= self.high_band: + flag.append(self.x_list[i]) + else: + flag = [] + + if not flag: + return 0 + else: + return flag[0] - self.x_list[0] + + def getSteadyStateError(self): + #steady state error is value when system can't reach setpoint in infinite iteration + p = len(self.y_list) -1 + return abs(self.y_list[p] - self.setpoint) \ No newline at end of file