[diary #7] Deep Learning LogicGate

kamchur·2022년 10월 16일
0

😁START

deep learning is shape simple structure
input layer → hidden layer → output layer

activation result made multiple input data and weight, add bias then in function

# sigmoid function
def sigmoid(x):
    return  1 / (1 + np.exp(-x))
# numerical derivative
def numerical_derivative(function, x):
    delta_x = 1e-4
    # print('delta_x : ', delta_x)
    
    grad = np.zeros_like(x)
    # print('grad apply(zeros_like function) : ',  grad)
    
    iterator = np.nditer(x, flags=['multi_index'])
    
    while not iterator.finished:
        idx = iterator.multi_index
        
        val = x[idx]
        x[idx] = float(val) + delta_x
        f1 = function(x)
        
        x[idx] = float(val) - delta_x
        f2 = function(x)
        
        grad[idx] = (f1 - f2) / (2 * delta_x)
        x[idx] = val
        
        iterator.iternext()
    
    return grad

LogicGate:class code

class LogicGate:
    def __init__(self, gate_name, xdata, tdata):
        self.name = gate_name
        
        # input layer unit
        self.__xdata = xdata.reshape(4, 2)   # 4개의 입력데이터
        self.__tdata = tdata.reshape(4, 1)
        
        # hidden layer unit
        self.__weight2 = np.random.rand(2, 6)   # 6개 노드의 은닉층?
        self.__bias2 = np.random.rand(6)
        
        # output layer unit
        self.__weight3 = np.random.rand(6, 1)
        self.__bias3 = np.random.rand(1)
        
        # learning rate init
        self.__learning_rate = 1e-2
        
        print(self.name + ' object is created')
    
    # feed-forward를 통한 손실함수 계산
    def feed_forward(self):
        delta = 1e-7   # log 무한대 방지
        
        z2 = np.dot(self.__xdata, self.__weight2) + self.__bias2
        a2 = sigmoid(z2)
        
        z3 = np.dot(a2, self.__weight3) + self.__bias3
        y_hat = a3 = sigmoid(z3)
        
        # print('feed forward --> y_hat : ', y)
        
        # cross-entropy
        return -np.sum( self.__tdata * np.log(y_hat + delta) + (1 - self.__tdata) * np.log((1 - y_hat) + delta) )
    
    # 외부 출력을 위한 손실함수 계산
    def loss_val(self):
        delta = 1e-7
        
        z2 = np.dot(self.__xdata, self.__weight2) + self.__bias2
        a2 = sigmoid(z2)
        
        z3 = np.dot(a2, self.__weight3) + self.__bias3
        y_hat = a3 = sigmoid(z3)
        
        # cross-entropy
        return -np.sum( self.__tdata * np.log(y_hat + delta) + (1 - self.__tdata) * np.log((1 - y_hat) + delta) )
    
    def train(self):
        f = lambda x : self.feed_forward()
        print('initial loss value = ', self.loss_val())
        
        for step in range(10001):
            self.__weight2 -= self.__learning_rate * numerical_derivative(f, self.__weight2)
            self.__bias2 -= self.__learning_rate * numerical_derivative(f, self.__bias2)
            self.__weight3 -= self.__learning_rate * numerical_derivative(f, self.__weight3)
            self.__bias3 -= self.__learning_rate * numerical_derivative(f, self.__bias3)
            
            if (step % 400 == 0):
                print('step = ', step, ' , loss value = ', self.loss_val())
                
    def predict(self, xdata):
        z2 = np.dot(xdata, self.__weight2) + self.__bias2
        a2 = sigmoid(z2)
        
        z3 = np.dot(a2, self.__weight3) + self.__bias3
        y = a3 = sigmoid(z3)
        
        if y > 0.5:
            result = 1
        else:
            result = 0
            
        return y, result

train data

# XOR
x_train = np.array([[0,0], [0,1], [1, 0], [1, 1]])
y_train = np.array([[0], [1], [1], [0]])

model = LogicGate('XOR',x_train, y_train)
model.train()

predict:test data

x_test = np.array([[0,0], [0,1], [1, 0], [1, 1]])

for data in x_test:
    print(model.predict(data))
[result]
(array([0.09933951]), 0)
(array([0.89373189]), 1)
(array([0.76636852]), 1)
(array([0.24363436]), 0)

😂END

2022.10.16. first commit

※Reference : NeoWizard youtube Deep Learning course

profile
chase free

0개의 댓글