SwitchNet Neural network examples

Neural networks using fast transforms as a core component.

You can view the fast transform as a super fast to compute synthetic weight matrix that you combine with parametric activation functions as the adjustable part.
Or you can view the fast transform as a 1 to all connection algorithm linking together multiple small width layers into a complete wide layer. Gaining a speed advantage and parameter reduction advantage.

// Test with Lissajous curves
color c1;
float rate;
int epochs;
float[][] ex;
float[] work = new float[256];
SWNet net;

void setup() {
  size(400, 400);
  net = new SWNet(256, 4, 4,0.1);//vecLen,widthBy,depth,learning rate
  rate=0.05f; //Learning rate         
  epochs=20; //Epochs per frame
  c1 = color(0xff, 0xd7, 00);
  ex=new float[8][256];
  for (int i = 0; i < 127; i++) {
    // Training data
    float t = (i * 2 * PI) / 127;
    ex[0][2 * i] = sin(t);
    ex[0][2 * i + 1] = sin(2 * t);
    ex[1][2 * i] = sin(2 * t);
    ex[1][2 * i + 1] = sin(t);
    ex[2][2 * i] = sin(2 * t);
    ex[2][2 * i + 1] = sin(3 * t);
    ex[3][2 * i] = sin(3 * t);
    ex[3][2 * i + 1] = sin(2 * t);
    ex[4][2 * i] = sin(3 * t);
    ex[4][2 * i + 1] = sin(4 * t);
    ex[5][2 * i] = sin(4 * t);
    ex[5][2 * i + 1] = sin(3 * t);
    ex[6][2 * i] = sin(2 * t);
    ex[6][2 * i + 1] = sin(5 * t);
    ex[7][2 * i] = sin(5 * t);
    ex[7][2 * i + 1] = sin(2 * t);
  }
  textSize(16);
}

void draw() {
  background(0);
  //loadPixels();
  for (int k=0; k<epochs; k++) {
    for (int i = 0; i < ex.length; i++) {
      net.train(ex[i], ex[i]);
    }
  }
  for (int i = 0; i < 8; i++) {
    for (int j = 0; j < 255; j += 2) {
      set(25 + i * 40 + int(18 * ex[i][j]), 44 + int(18 * ex[i][j + 1]), c1);
    }
  }
  for (int i = 0; i < 8; i++) {
    net.recall(work, ex[i]);
    for (int j = 0; j < 255; j += 2) {
      set(25 + i * 40 +int( 18 * work[j]), 104 + int(18 * work[j + 1]), c1);
    }
  }
  //updatePixels();
  text("Training Data", 5, 20);
  text("Autoassociative recall", 5, 80);
  text("Iterations: " + frameCount*epochs, 5, 150);
}

final class SWNet {
  final int vecLen;
  final int widthBy;
  final int depth;
  final float[] params;
  final float[] surface;
  final float[] work;
  final float[] flipsIn;
  final float[] flipsOut;
  float rate;
  SWNet(int vecLen, int widthBy, int depth, float rate) {
    this.vecLen = vecLen;
    this.widthBy=widthBy; // Increase the width to allow full information flow through each layer.
    this.depth = depth;
    this.rate=rate;
    params = new float[2*vecLen*widthBy*depth];
    surface=new float[vecLen*widthBy*depth];
    work=new float[vecLen*widthBy];
    flipsIn=new float[vecLen*widthBy];
    flipsOut=new float[vecLen];
    for (int i = 0; i < params.length; i++) {
      params[i] = randomGaussian();
    }
    int r = 0x9E3779B9;
    for (int i=0; i<vecLen*widthBy; i++) {
	  r=r*1664525+1013904223; // Numerical receipes LCG random number generator
      flipsIn[i] = (r & 1<<16) == 0 ? -1f : 1f; // bit 16, higher bits more random
    }
    for (int i=0; i<vecLen; i++) {
	  r=r*1664525+1013904223;
      flipsOut[i] = (r & 1<<16) == 0 ? -1f : 1f;
    }
  }

  void recall(float[] result, float[] input) {
    final int n=vecLen*widthBy;
    for (int i=0; i<n; i++) {
      work[i]=flipsIn[i]*input[i&(vecLen-1)]; //vecLen-1 acts as a binary mask
    }
    whtN(work); // Quasi Random Projection of input, the spectral response aspect of the transform is not need
    int paIdx = 0;
    for (int i = 0; i < depth; i++) {
      for (int j = 0; j < n; j ++) {
        float x= work[j];
        if (x < 0f) {
          work[j]=x*params[paIdx];
        } else {
          work[j]=x*params[paIdx+1];
        }
        paIdx +=2;
      }
      whtN(work);
    }
    for(int i=0;i<vecLen;i++){
      result[i]=work[i]*flipsOut[i];
    }  
  }

  void train(float[] target, float[] input) {
    final int n=vecLen*widthBy; // Forward pass
    for (int i=0; i<n; i++) {
      work[i]=flipsIn[i]*input[i&(vecLen-1)];
    }
    whtN(work);
    int paIdx = 0;
    for (int i = 0; i < depth; i++) {
      System.arraycopy(work, 0, surface, i*n, n); // store layer outputs 
      for (int j = 0; j < n; j ++) {
        float x= work[j];
        if (x < 0) {
          work[j]=x*params[paIdx];
        } else {
          work[j]=x*params[paIdx+1];
        }
        paIdx +=2;
      }
      whtN(work);
    }
    for(int i=0;i<vecLen;i++){
      work[i]=work[i]*flipsOut[i];
    }  
    for (int i=0; i<vecLen; i++) {
      work[i]=(target[i]-work[i])*rate; //error by learning rate
    }
    for(int i=0;i<vecLen;i++){
      work[i]=work[i]*flipsOut[i];
    } 
    for (int i=vecLen; i<n; i++) {    // clear upper
      work[i]=0f;
    }
    for (int i = depth-1; i >=0; i--) {
      whtN(work);  // invert error back
      paIdx=i*n*2;
      int surIdx=i*n;
      for (int j = 0; j < n; j ++,paIdx+=2) {
        float x=surface[surIdx+j];
        float e=work[j]; // error
        if (x < 0f) {    
          params[paIdx]+=x*e;
          work[j]*=params[paIdx];
        } else {
          params[paIdx+1]+=x*e;
          work[j]*=params[paIdx+1];
        }
      }
    }
  }
}


// Fast Walsh Hadamard Transform (normalized for scale.)
void whtN(float[] vec) {
  final int n = vec.length;
  int hs = 1;
  while (hs < n) {
    int i = 0;
    while (i < n) {
      final int j = i + hs;  // final here is good hint to hotspot
      while (i < j) {
        float a = vec[i];
        float b = vec[i + hs];
        vec[i] = a + b;
        vec[i + hs] = a - b;
        i += 1;
      }
      i += hs;
    }
    hs += hs;
  }
  float sc=1f/sqrt(n);  // scale to leave overall vector magnitude unchanged
  for (int i=0; i<n; i++) {
    vec[i]*=sc;
  }
}

Further experiments:
https://archive.org/details/switch-nets