# Community

Problem with numpy ...

Share:

# Problem with numpy implementation of single hidden layer neural network (@chinmayab)
New Member
Joined: 5 months ago
Posts: 1
02/09/2019 9:34 am

I am trying to build a binary classifier (to classify pulsar star) using single hidden layer neural network, I have used <a href=" removed link " target="true">This data set from kaggle.

My code implemented with numpy is not working cost remains unchanged, I implemented <a href=" removed link " target="true">same network with keras and it works fine,

Following is Numpy Implementation

`import osimport csv import numpy as npdef load_dataset(file):	with open(file, 'r') as work_file:		reader = list(csv.reader(work_file))		total = len(reader)		train_set = reader[:round(total * 0.8)]		val_set = reader[:round(total * 0.2)]		features = len(train_set[:8])		x_train = np.zeros((len(train_set), features))		y_train = np.zeros((len(train_set), 1))		x_val = np.zeros((len(val_set), features))		y_val = np.zeros((len(val_set), 1))				for index, val in enumerate(train_set):			x_train[index] = val[:features]			y_train[index] = val[-1]		for index, val in enumerate(val_set):			x_val[index] = val[:features]			y_val[index] = val[-1]	return x_train, y_train, x_val, y_valdef activation(fun, var):	val = 0.0	if fun == 'tanh':		val = np.tanh(var)		# val = np.exp(2 * var) - 1 / np.exp(2 * var) + 1		elif fun == 'sigmoid':		val = 1/ (1 + np.exp(-var))	elif fun == 'relu':		val = max(0, var)		elif fun == 'softmax':		pass	return valdef loss_calc(y, a):	return -(np.dot(y, np.log(a)) + np.dot((1-y), np.log(a)))	# return -(y * np.log(a) + (1-y) * np.log(a))x_train, y_train, x_val, y_val = load_dataset('workwith_data.csv')norm = np.linalg.norm(x_train)print(x_train)x_train = x_train/normprint(x_train)w1 = np.random.randn(x_train.shape, 3) * 0.0001w2 = np.random.randn(3, 1) * 0.01# baises over layersb1 = 0.0b2 = 0.0cost = 0.0dw1 = 0.0db1 = 0.0dw2 = 0.0db2 = 0.0samples = x_train.shapelr = 0.01for i in range(1000):	# forward pass	z1 = np.matmul(x_train, w1) + b1	a1 = activation(fun='tanh', var=z1)	z2 = np.matmul(a1, w2) + b2	a2 = activation(fun='sigmoid', var=z2)	loss = loss_calc(y_train.T, a2)	cost =  np.sum(loss)/samples	print(cost)	# Backprop	dz2 = a2 - y_train	dw2 += np.matmul(dz2.T, a1)/samples	db2 += dz2/samples	tanh_diff = 1 - np.square(z1)	dz1 = (w2.T * dz2) * tanh_diff	dw1 += np.matmul(dz1.T, x_train)/samples	db1 += dz1/samples	w1 = w1 - lr * dw1.T	w2 = w2 - lr * dw2.T	print('iteration ' + str(i) + ' cost'+str(cost))`

This topic was modified 5 months ago by chinmayab

Topic Tags (@mo-rebaie)
Estimable Member
Joined: 10 months ago
Posts: 104
10/09/2019 5:32 pm

Hello @chinmayab,

feel free to contact me at LinkedIn so that I can understand your issue to follow up and discuss it.