diff --git a/CSharp/CSharp.csproj b/CSharp/CSharp.csproj
index d439800..f02677b 100644
--- a/CSharp/CSharp.csproj
+++ b/CSharp/CSharp.csproj
@@ -1,10 +1,10 @@
-
-
-
- Exe
- net7.0
- enable
- enable
-
-
-
+
+
+
+ Exe
+ net7.0
+ enable
+ enable
+
+
+
diff --git a/FSharp/FSharp.fsproj b/FSharp/FSharp.fsproj
index 4bd5f1a..bc07e67 100644
--- a/FSharp/FSharp.fsproj
+++ b/FSharp/FSharp.fsproj
@@ -1,13 +1,13 @@
-
-
-
- Exe
- net7.0
-
-
-
-
-
-
-
-
+
+
+
+ Exe
+ net7.0
+
+
+
+
+
+
+
+
diff --git a/FSharp/Program.fs b/FSharp/Program.fs
index e2b0327..79f5eb8 100644
--- a/FSharp/Program.fs
+++ b/FSharp/Program.fs
@@ -1,74 +1,74 @@
-(*
-Licensed under the MIT License given below.
-Copyright 2023 Daniel Lidstrom
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the “Software”), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*)
-
-open Neural
-
-let randFloat =
- let P = 2147483647u
- let A = 16807u;
- let mutable current = 1u
- let inner() =
- current <- current * A % P;
- let result = float current / float P
- result
- inner
-let xor a b = a ^^^ b
-let orf (a: int) b = a ||| b
-let andf (a: int) b = a &&& b
-let xnor a b = 1 - xor a b
-let nand a b = 1 - andf a b
-let nor a b = 1 - orf a b
-
-let trainingData = [|
- for i = 0 to 1 do
- for j = 0 to 1 do
- [| float i; j |],
- [| xor i j |> float; xnor i j; orf i j; andf i j; nor i j; nand i j |]
-|]
-
-let trainer = Trainer(2, 2, 6, randFloat)
-let lr = 1.0
-let ITERS = 4000
-for e = 0 to ITERS - 1 do
- let input, y = trainingData[e % trainingData.Length]
- trainer.Train(input, y, lr)
-
-let network = trainer.Network
-printfn "Result after %d iterations" ITERS
-printfn " XOR XNOR OR AND NOR NAND"
-for i, _ in trainingData do
- let pred = network.Predict(i)
- printfn
- "%.0f,%.0f = %.3f %.3f %.3f %.3f %.3f %.3f"
- i[0]
- i[1]
- pred[0]
- pred[1]
- pred[2]
- pred[3]
- pred[4]
- pred[5]
-
-let networkVals = {|
- WeightsHidden = network.WeightsHidden
- BiasesHidden = network.BiasesHidden
- WeightsOutput = network.WeightsOutput
- BiasesOutput = network.BiasesOutput
-|}
-printfn $"network: %A{networkVals}"
+(*
+Licensed under the MIT License given below.
+Copyright 2023 Daniel Lidstrom
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the “Software”), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*)
+
+open Neural
+
+let randFloat =
+ let P = 2147483647u
+ let A = 16807u;
+ let mutable current = 1u
+ let inner() =
+ current <- current * A % P;
+ let result = float current / float P
+ result
+ inner
+let xor a b = a ^^^ b
+let orf (a: int) b = a ||| b
+let andf (a: int) b = a &&& b
+let xnor a b = 1 - xor a b
+let nand a b = 1 - andf a b
+let nor a b = 1 - orf a b
+
+let trainingData = [|
+ for i = 0 to 1 do
+ for j = 0 to 1 do
+ [| float i; j |],
+ [| xor i j |> float; xnor i j; orf i j; andf i j; nor i j; nand i j |]
+|]
+
+let trainer = Trainer(2, 2, 6, randFloat)
+let lr = 1.0
+let ITERS = 4000
+for e = 0 to ITERS - 1 do
+ let input, y = trainingData[e % trainingData.Length]
+ trainer.Train(input, y, lr)
+
+let network = trainer.Network
+printfn "Result after %d iterations" ITERS
+printfn " XOR XNOR OR AND NOR NAND"
+for i, _ in trainingData do
+ let pred = network.Predict(i)
+ printfn
+ "%.0f,%.0f = %.3f %.3f %.3f %.3f %.3f %.3f"
+ i[0]
+ i[1]
+ pred[0]
+ pred[1]
+ pred[2]
+ pred[3]
+ pred[4]
+ pred[5]
+
+let networkVals = {|
+ WeightsHidden = network.WeightsHidden
+ BiasesHidden = network.BiasesHidden
+ WeightsOutput = network.WeightsOutput
+ BiasesOutput = network.BiasesOutput
+|}
+printfn $"network: %A{networkVals}"
diff --git a/Python/Xor.py b/Python/Xor.py
index aaf3063..7524e14 100644
--- a/Python/Xor.py
+++ b/Python/Xor.py
@@ -1,104 +1,104 @@
-# https://mlnotebook.github.io/post/nn-in-python/
-# https://flipdazed.github.io/blog/python%20tutorial/introduction-to-neural-networks-in-python-using-XOR
-
-import numpy as np
-
-# true means run a single epoch and output intermediate values; set to false to run all epochs
-debug = False
-
-np.set_printoptions(precision=3, suppress=True)
-
-np.random.seed(42) # this makes sure you get the same results as me
-
-def xor(x1, x2):
- return bool(x1) != bool(x2)
-
-def xnor(x1, x2):
- return 1 - xor(x1, x2)
-
-def orf(x1, x2):
- return bool(x1) or bool(x2)
-
-def andf(x1, x2):
- return bool(x1) and bool(x2)
-
-def nand(x1, x2):
- return 1 - andf(x1, x2)
-
-def nor(x1, x2):
- return 1 - orf(x1, x2)
-
-def sigmoid(x):
- return 1 / (1 + np.exp(-x))
-
-def sigmoid_derivative(sigmoid_result):
- return sigmoid_result * (1 - sigmoid_result)
-
-def error(target, prediction):
- return .5 * (target - prediction)**2
-
-def error_derivative(target, prediction):
- return - target + prediction
-
-xs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
-ys = np.array([
- [xor(*i), xnor(*i), orf(*i), andf(*i), nor(*i), nand(*i)]
- for i in xs
- ],
- dtype=int)
-print(ys)
-alpha = 1
-n_neurons_input, n_neurons_hidden, n_neurons_output = 2, 2, 6
-
-w_hidden = np.random.random(size=(n_neurons_input, n_neurons_hidden))
-if debug: print("w_hidden", w_hidden)
-b_hidden = np.random.random(size=(1, n_neurons_hidden))
-if debug: print("b_hidden", b_hidden)
-
-w_output = np.random.random(size=(n_neurons_hidden, n_neurons_output))
-if debug: print("w_output", w_output)
-b_output = np.random.random(size=(1, n_neurons_output))
-if debug: print("b_output", b_output)
-
-for i in range(4000):
- ix = i % len(xs)
- x = xs[ix:ix+1]
- y = ys[ix:ix+1]
-
- # forward prop
- if debug: print("x", x)
- y_hidden = sigmoid(np.dot(x, w_hidden) + b_hidden)
- if debug: print("y_hidden = sigmoid(np.dot(x, w_hidden) + b_hidden)", y_hidden)
- if debug: print("np.dot(y_hidden, w_output)", np.dot(y_hidden, w_output))
- if debug: print("np.dot(y_hidden, w_output) + b_output", np.dot(y_hidden, w_output) + b_output)
- y_output = sigmoid(np.dot(y_hidden, w_output) + b_output)
- if debug: print("y_output = sigmoid(np.dot(y_hidden, w_output) + b_output)", y_output)
-
- # back prop
- grad_output = error_derivative(y, y_output) * sigmoid_derivative(y_output)
- if debug: print("grad_output = error_derivative(y, y_output) * sigmoid_derivative(y_output)", grad_output)
- if debug: print("grad_output", grad_output)
- if debug: print("w_output.T", w_output.T)
- if debug: print("grad_output.dot(w_output.T)", grad_output.dot(w_output.T))
- grad_hidden = grad_output.dot(w_output.T) * sigmoid_derivative(y_hidden)
- if debug: print("sigmoid_derivative(y_hidden)", sigmoid_derivative(y_hidden))
- if debug: print("grad_hidden = grad_output.dot(w_output.T) * sigmoid_derivative(y_hidden)", grad_hidden)
-
- # update parameters
- if debug: print("y_hidden.T.dot(grad_output)", y_hidden.T.dot(grad_output))
- w_output -= alpha * y_hidden.T.dot(grad_output)
- if debug: print("w_output -= alpha * y_hidden.T.dot(grad_output)", w_output)
- w_hidden -= alpha * x.T.dot(grad_hidden)
- if debug: print("w_hidden -= alpha * x.T.dot(grad_hidden)", w_hidden)
-
- if debug: print("np.sum(grad_output)", np.sum(grad_output))
- b_output -= alpha * grad_output
- if debug: print("b_output -= alpha * np.sum(grad_output)", b_output)
- if debug: print("np.sum(grad_hidden)", np.sum(grad_hidden))
- b_hidden -= alpha * grad_hidden
- if debug: print("b_hidden -= alpha * np.sum(grad_hidden)", b_hidden)
- if debug: break
-
-ys_hidden = sigmoid(np.dot(xs, w_hidden) + b_hidden)
-ys_output = sigmoid(np.dot(ys_hidden, w_output) + b_output)
-print(ys_output)
+# https://mlnotebook.github.io/post/nn-in-python/
+# https://flipdazed.github.io/blog/python%20tutorial/introduction-to-neural-networks-in-python-using-XOR
+
+import numpy as np
+
+# true means run a single epoch and output intermediate values; set to false to run all epochs
+debug = False
+
+np.set_printoptions(precision=3, suppress=True)
+
+np.random.seed(42) # this makes sure you get the same results as me
+
+def xor(x1, x2):
+ return bool(x1) != bool(x2)
+
+def xnor(x1, x2):
+ return 1 - xor(x1, x2)
+
+def orf(x1, x2):
+ return bool(x1) or bool(x2)
+
+def andf(x1, x2):
+ return bool(x1) and bool(x2)
+
+def nand(x1, x2):
+ return 1 - andf(x1, x2)
+
+def nor(x1, x2):
+ return 1 - orf(x1, x2)
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+def sigmoid_derivative(sigmoid_result):
+ return sigmoid_result * (1 - sigmoid_result)
+
+def error(target, prediction):
+ return .5 * (target - prediction)**2
+
+def error_derivative(target, prediction):
+ return - target + prediction
+
+xs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
+ys = np.array([
+ [xor(*i), xnor(*i), orf(*i), andf(*i), nor(*i), nand(*i)]
+ for i in xs
+ ],
+ dtype=int)
+print(ys)
+alpha = 1
+n_neurons_input, n_neurons_hidden, n_neurons_output = 2, 2, 6
+
+w_hidden = np.random.random(size=(n_neurons_input, n_neurons_hidden))
+if debug: print("w_hidden", w_hidden)
+b_hidden = np.random.random(size=(1, n_neurons_hidden))
+if debug: print("b_hidden", b_hidden)
+
+w_output = np.random.random(size=(n_neurons_hidden, n_neurons_output))
+if debug: print("w_output", w_output)
+b_output = np.random.random(size=(1, n_neurons_output))
+if debug: print("b_output", b_output)
+
+for i in range(4000):
+ ix = i % len(xs)
+ x = xs[ix:ix+1]
+ y = ys[ix:ix+1]
+
+ # forward prop
+ if debug: print("x", x)
+ y_hidden = sigmoid(np.dot(x, w_hidden) + b_hidden)
+ if debug: print("y_hidden = sigmoid(np.dot(x, w_hidden) + b_hidden)", y_hidden)
+ if debug: print("np.dot(y_hidden, w_output)", np.dot(y_hidden, w_output))
+ if debug: print("np.dot(y_hidden, w_output) + b_output", np.dot(y_hidden, w_output) + b_output)
+ y_output = sigmoid(np.dot(y_hidden, w_output) + b_output)
+ if debug: print("y_output = sigmoid(np.dot(y_hidden, w_output) + b_output)", y_output)
+
+ # back prop
+ grad_output = error_derivative(y, y_output) * sigmoid_derivative(y_output)
+ if debug: print("grad_output = error_derivative(y, y_output) * sigmoid_derivative(y_output)", grad_output)
+ if debug: print("grad_output", grad_output)
+ if debug: print("w_output.T", w_output.T)
+ if debug: print("grad_output.dot(w_output.T)", grad_output.dot(w_output.T))
+ grad_hidden = grad_output.dot(w_output.T) * sigmoid_derivative(y_hidden)
+ if debug: print("sigmoid_derivative(y_hidden)", sigmoid_derivative(y_hidden))
+ if debug: print("grad_hidden = grad_output.dot(w_output.T) * sigmoid_derivative(y_hidden)", grad_hidden)
+
+ # update parameters
+ if debug: print("y_hidden.T.dot(grad_output)", y_hidden.T.dot(grad_output))
+ w_output -= alpha * y_hidden.T.dot(grad_output)
+ if debug: print("w_output -= alpha * y_hidden.T.dot(grad_output)", w_output)
+ w_hidden -= alpha * x.T.dot(grad_hidden)
+ if debug: print("w_hidden -= alpha * x.T.dot(grad_hidden)", w_hidden)
+
+ if debug: print("np.sum(grad_output)", np.sum(grad_output))
+ b_output -= alpha * grad_output
+ if debug: print("b_output -= alpha * np.sum(grad_output)", b_output)
+ if debug: print("np.sum(grad_hidden)", np.sum(grad_hidden))
+ b_hidden -= alpha * grad_hidden
+ if debug: print("b_hidden -= alpha * np.sum(grad_hidden)", b_hidden)
+ if debug: break
+
+ys_hidden = sigmoid(np.dot(xs, w_hidden) + b_hidden)
+ys_output = sigmoid(np.dot(ys_hidden, w_output) + b_output)
+print(ys_output)
diff --git a/Rust/src/rnd.rs b/Rust/src/rnd.rs
index b50052e..6700b78 100644
--- a/Rust/src/rnd.rs
+++ b/Rust/src/rnd.rs
@@ -1,38 +1,38 @@
-/*
- Licensed under the MIT License given below.
- Copyright 2023 Daniel Lidstrom
- Permission is hereby granted, free of charge, to any person obtaining a copy of
- this software and associated documentation files (the “Software”), to deal in
- the Software without restriction, including without limitation the rights to
- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
- the Software, and to permit persons to whom the Software is furnished to do so,
- subject to the following conditions:
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
- THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
- FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
- COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-const P: u32 = 2147483647;
-const A: u32 = 16807;
-pub struct Rnd {
- current: u32
-}
-impl Rnd {
- pub fn new() -> Rnd {
- Rnd { current: 1 }
- }
- pub fn next(&mut self) -> u32 {
- self.current = self.current.wrapping_mul(A) % P;
- self.current
- }
- pub fn next_float(&mut self) -> f64 {
- let u = self.next();
- let result = (u) as f64 / P as f64;
- result
- }
-}
+/*
+ Licensed under the MIT License given below.
+ Copyright 2023 Daniel Lidstrom
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
+ this software and associated documentation files (the “Software”), to deal in
+ the Software without restriction, including without limitation the rights to
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ the Software, and to permit persons to whom the Software is furnished to do so,
+ subject to the following conditions:
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+const P: u32 = 2147483647;
+const A: u32 = 16807;
+pub struct Rnd {
+ current: u32
+}
+impl Rnd {
+ pub fn new() -> Rnd {
+ Rnd { current: 1 }
+ }
+ pub fn next(&mut self) -> u32 {
+ self.current = self.current.wrapping_mul(A) % P;
+ self.current
+ }
+ pub fn next_float(&mut self) -> f64 {
+ let u = self.next();
+ let result = (u) as f64 / P as f64;
+ result
+ }
+}
diff --git a/doc/brain.png b/doc/brain.png
index 65ce043..d598142 100644
Binary files a/doc/brain.png and b/doc/brain.png differ
diff --git a/doc/networks.png b/doc/networks.png
index 243250a..340a91d 100644
Binary files a/doc/networks.png and b/doc/networks.png differ
diff --git a/doc/nn.png b/doc/nn.png
index 533ee73..e97a01a 100644
Binary files a/doc/nn.png and b/doc/nn.png differ
diff --git a/semeion.data.bz2 b/semeion.data.bz2
index ade0cce..abe6054 100644
Binary files a/semeion.data.bz2 and b/semeion.data.bz2 differ