Первый прототип нейросети готов

This commit is contained in:
2025-08-18 20:04:38 +07:00
parent c301c9f825
commit a3337ef60c
5 changed files with 42 additions and 47 deletions

View File

@@ -14,6 +14,9 @@ class Node:
name = f"{self.label}:" if self.label else ""
return f"<{name}{self.op or 'var'} val={self.val:.6g} grad={self.grad:.6g}>"
def __float__(self):
return self.val
@staticmethod
def _to_node(x):
return x if isinstance(x, Node) else Node(x)
@@ -63,8 +66,10 @@ class Node:
def __rtruediv__(self, other): return Node._to_node(other) / self
def __pow__(self, other):
def pow_with_node(self, other):
other = Node._to_node(other)
if self.val <= 0:
raise ValueError("base must be > 0 for real-valued power")
out = Node(self.val**other.val, parents=[(self, lambda g: g * other.val * (self.val ** (other.val-1))), (other, lambda g: g * (self.val ** other.val) * math.log(self.val))], op=f"**{other.val}")
def _backward():
self.grad += out.grad * other.val * (self.val ** (other.val-1))
@@ -72,6 +77,14 @@ class Node:
out._backward = _backward
return out
def __pow__(self, p: float):
# степень фиксируем скаляром p
out = Node(self.val**p, parents=[(self, lambda g: g * p * (self.val ** (p-1)))], op=f"**{p}")
def _backward():
self.grad += out.grad * p * (self.val ** (p-1))
out._backward = _backward
return out
def __rpow__(self, p: float):
# основание фиксируем скаляром p
out = Node(p**self.val, parents=[(self, lambda g: g * p ** self.val * math.log(p))], op=f"{p}**")
@@ -113,14 +126,12 @@ def backward(loss: Node):
build(p)
topo.append(u)
build(loss)
print(topo, list(reversed(topo)), visited)
# 2) инициализируем dL/dL = 1 и идём в обратном порядке
for n in topo:
n.grad = 0.0
loss.grad = 1.0
for node in reversed(topo):
node._backward()
print(node)
if __name__ == "__main__":

View File

@@ -1,18 +1,18 @@
import random
class DataSet:
def __init__(self, N=1000) -> None:
def __init__(self, func, N=1000) -> None:
self.train = []
self.train_answs = []
self.test = []
self.test_answs = []
for i in range(N//5*4):
x = random.uniform(-1000, 1000)
x = random.uniform(1, 9)
self.train.append(x)
self.train_answs.append(x+1)
self.train_answs.append(func(x))
for i in range(N//5):
x = random.uniform(-1000, 1000)
x = random.uniform(1, 9)
self.test.append(x)
self.test_answs.append(x+1)
self.test_answs.append(func(x))

View File

@@ -1,4 +1,7 @@
import classes
def generate_dataset(N=1000):
return classes.DataSet(N)
return classes.DataSet(lambda x: 1*x-12, N)
if __name__ == "__main__":
print([[i.train, i.train_answs] for i in [generate_dataset(10)]])

View File

@@ -4,7 +4,7 @@ import visual
import neuro_defs
dataset = generate.generate_dataset(1000)
dataset = generate.generate_dataset(100)
# Создаём и обучаем сеть
@@ -12,8 +12,9 @@ nn = neuro_defs.SimpleNN()
nn.train(dataset.train, dataset.train_answs, epochs=100)
# Проверяем на новой точке
for dot in dataset.test[:10]:
print(nn.forward(dot), dot)
for dot in range(len(dataset.test)):
print(nn.forward(dataset.test[dot]).val, dataset.test_answs[dot])
print()
print(nn.w_out.val, nn.b_out.val)
# visual.plot_dataset(dataset)
# visual.plt_show()

View File

@@ -1,48 +1,28 @@
import math
import random
from auto_diff import auto_diff
def sigmoid(x):
if x >= 0:
z = math.exp(-x)
return 1 / (1 + z)
else:
z = math.exp(x)
return z / (1 + z)
def sigmoid_derivative(x):
s = sigmoid(x)
return s * (1 - s)
def func_active(x):
return
class SimpleNN:
def __init__(self):
# инициализация весов случайными числами
self.w1 = random.uniform(-1, 1)
self.b = random.uniform(-1, 1) # смещение
self.w_out = random.uniform(-1, 1)
self.b_out = random.uniform(-1, 1)
self.lr = 0.001 # скорость обучения
self.w_out = auto_diff.Node(random.uniform(-1, 1), label="w_out")
self.b_out = auto_diff.Node(random.uniform(-1, 1), label="b_out")
self.lr = 0.02 # скорость обучения
def forward(self, x1):
def forward(self, x):
# прямой проход
self.z1 = self.w1 * x1 + self.b
self.a1 = sigmoid(self.z1) # активация скрытого слоя
self.z2 = self.w_out * self.a1 + self.b_out
self.a2 = sigmoid(self.z2) # выход сети
return self.a2
self.z1 = self.w_out * x + self.b_out
return self.z1
def backward(self, x1, y):
def backward(self, x, y):
# вычисляем ошибку
error = self.a2 - y # dL/da2
error = (self.z1 - y)**2 # dL/da2
# производные для выходного слоя
d_out = error * sigmoid_derivative(self.z2)
self.w_out -= self.lr * d_out * self.a1
self.b_out -= self.lr * d_out
# производные для скрытого слоя
d_hidden = d_out * self.w_out * sigmoid_derivative(self.z1)
self.w1 -= self.lr * d_hidden * x1
self.b -= self.lr * d_hidden
auto_diff.backward(error)
self.w_out = auto_diff.Node(float(self.w_out) - self.lr * self.w_out.grad, label="w_out")
self.b_out = auto_diff.Node(float(self.b_out) - self.lr * self.b_out.grad, label="b_out")
def train(self, dataset, answs, epochs=1000):
for _ in range(epochs):