i am new to python,trying to learn machine learning in python.i have tried to write a neural network from scratch with one hidden layer on the famous iris dataset.this is a three class classifier with out put as one hot vectors.i have also taken help from already written algos for help.for instance i used the same training set as my testing set.
it is a huge code to go through,i would like you to tell me, that how do we subtract 'y' output( which is one hot vector) of dimensions (150,3) and my out y softmax will be of vector (150,21).this is my biggest problem.i tried to look online everyone have used this method but since i am weak in python i don't understand it.this is the line of code delta3[range(m1), y] -= 1
arrays used as indices must be of integer (or boolean) type
if m1 is sie of(150)
and if i give size m1(150,3) then
delta3[range(m1), y] -= 1
TypeError: range() integer end argument expected, got tuple.
remember m1=150
my y vector=150,3
softmax=150,21
my code is
#labels or classes
#1=iris-setosa
#2=iris-versicolor
#0=iris-virginica
#features
#sepallength
#sepalwidth
#petallengthcm
#petalwidth
import pandas as pd
import matplotlib.pyplot as plt
import csv
import numpy as np
df=pd.read_csv('Iris.csv')
df.convert_objects(convert_numeric=True)
df.fillna(0,inplace=True)
df.drop(['Id'],1,inplace=True)
#function to convert three labels into values 0,1,2
def handle_non_numericaldata(df):
columns=df.columns.values
for column in columns:
text_digit_vals={}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype!=np.int64 and df[column].dtype!=np.float:
column_contents=df[column].values.tolist()
unique_elements=set(column_contents)
x=0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique]=x
x+=1
df[column]=list(map(convert_to_int,df[column]))
return(df)
handle_non_numericaldata(df)
x=np.array(df.drop(['Species'],1).astype(float))
c=np.array(df['Species'])
n_values=(np.max(c)+1)
y=(np.eye(n_values)[c])
m1=np.size(c)
theta=np.ones(shape=(4,1))
theta2=np.ones(shape=(1,21))
#no of examples "m"
#learning rate alpha
alpha=0.01
#regularization parameter
lamda=0.01
for i in range(1,1000):
z1=np.dot(x,theta)
sigma=1/(1+np.exp(-z1))
#activation layer 2.
a2=sigma
z2=np.dot(a2,theta2)
probs=np.exp(z2)
softmax=probs/np.sum(probs,axis=1,keepdims=True)
delta3=softmax
delta3[range(m1), y] -= 1
A2=np.transpose(a2)
dw2 = (A2).dot(delta3)
W2=np.transpose(theta2)
delta2=delta3.dot(W2)*sigma*(1-sigma)
X2=np.transpose(x)
dw1=np.dot(X2,delta2)
dw2=dw2-lamda*theta2
dw1=dw1-lamda*theta
theta =theta -alpha* dw1
theta2= theta2-alpha * dw2
correct_logprobs=0
correct_logprobs=correct_logprobs-np.log(probs[range(m1),y])
data_loss=np.sum(correct_logprobs)
data_loss+=lamda/2*(np.sum(np.square(theta))+ np.square(theta2))
loss=1./m1*data_loss
if 1000%i==0:
print("loss after iteration%i:%f",loss)
final1=x.dot(theta)
sigma=1/(1+np.exp(-final1))
z2=sigma.dot(theta2)
exp_scores=np.exp(z2)
probs=exp_scores/np.sum(exp_scores,axis=1,keepdims=True)
print(np.argmax(probs,axis=1))