Main Page   Namespace List   Class Hierarchy   Compound List   File List   Namespace Members   Compound Members   File Members  

fixedwtregion.h

Go to the documentation of this file.
00001 
00007 #ifndef __FIXEDWTREGION_H__
00008 #define __FIXEDWTREGION_H__
00009 
00010 #include <assert.h>
00011 #include <algorithm>
00012 #include <vector>
00013 #include <functional>
00014 //#include <unistd.h> // Temporary; for sync()
00015 
00016 #include "genericalgs.h"
00017 #include "robj.h"
00018 #include "neuralregion.h"
00019 #include "matrix.h"
00020 #include "ipc.h" // 020614 TEMPORARY!
00021 
00022 
00023 /******************************************************************************/
00024 /* General-purpose utility functions                                          */
00025 /******************************************************************************/
00026 
00028 template <class MatrixK, class MatrixI, class MatrixO>
00029 void convolve(const MatrixK& kernel, const MatrixI& in, MatrixO& out,
00030               const typename MatrixO::value_type size_scale=1.0,
00031               const bool overwrite=true)
00032 {
00033   typedef typename MatrixK::size_type  SubscriptK;
00034   typedef typename MatrixI::size_type  SubscriptI;
00035   typedef typename MatrixO::size_type  SubscriptO;
00036   
00037   for (SubscriptO r=0; r < out.nrows(); r++)
00038     for (SubscriptO c=0; c < out.ncols(); c++){
00039       /* The min is just for safety when rounding certain non-integer size_scales */
00040       const SubscriptI  rp = std::min(SubscriptI(r*size_scale),SubscriptI(in.nrows()-kernel.nrows()));
00041       const SubscriptI  cp = std::min(SubscriptI(c*size_scale),SubscriptI(in.ncols()-kernel.ncols()));
00042       typename MatrixO::value_type sum=0;
00043       
00044       for (SubscriptK k=0; k < kernel.nrows(); k++)
00045         for (SubscriptK l=0; l < kernel.ncols(); l++)
00046           sum += kernel[k][l]*in[rp+k][cp+l];
00047       if (overwrite)
00048         out[r][c]  = sum;
00049       else
00050         out[r][c] += sum;
00051     }
00052 }
00053 
00054 
00055 
00059 template <class MatrixK, class MatrixI, class MatrixO>
00060 void dense_convolve(const MatrixK& kernel, const MatrixI& in, MatrixO& out,
00061                     const typename MatrixO::value_type size_scale=1.0,
00062                     const bool overwrite=true)
00063 {
00064   typedef typename MatrixK::size_type  SubscriptK;
00065   typedef typename MatrixI::size_type  SubscriptI;
00066   typedef typename MatrixO::size_type  SubscriptO;
00067   typedef typename MatrixK::value_type ValueK;
00068   typedef typename MatrixI::value_type ValueI;
00069   typedef typename MatrixO::value_type ValueO;
00070 
00071   const SubscriptK knr   = kernel.nrows();
00072   const SubscriptK knc   = kernel.ncols();
00073   const SubscriptI rpmin = SubscriptI(in.nrows()-knr);
00074   const SubscriptI cpmin = SubscriptI(in.ncols()-knc);
00075 
00076   for (SubscriptO r=0; r < out.nrows(); r++) {
00077     ValueK* oc=&(out[r][0]);
00078     for (SubscriptO c=0; c < out.ncols(); c++,oc++){
00079       /* The min is just for safety when rounding certain non-integer size_scales */
00080       const SubscriptI  rp = std::min(SubscriptI(r*size_scale),rpmin);
00081       const SubscriptI  cp = std::min(SubscriptI(c*size_scale),cpmin);
00082       typename MatrixO::value_type sum=0;
00083       
00084       for (SubscriptK k=0; k < knr; k++) {
00085         const ValueK* kl=&(kernel[k][0]);
00086         const ValueK* kbound=kl+knc;
00087         const ValueI* il=&(in[rp+k][cp]);
00088 
00089         /* This is a time-critical loop; make sure the compiler unrolls it */
00090         for (; kl<kbound; kl++,il++)
00091           sum += (*kl)*(*il);
00092       }
00093       if (overwrite)
00094         (*oc)  = sum;
00095       else
00096         (*oc) += sum;
00097     }
00098   }
00099 }
00100 
00101   
00102 
00103 /******************************************************************************/
00104 /* NeuralRegion classes                                                       */
00105 /******************************************************************************/
00106 
00107 
00118 class FixedWtRegion : public InternalNeuralRegion {
00119 public:
00120   FixedWtRegion(string name_i, Dimensions dims, ActivationFunction* actfn_=0)
00121     : InternalNeuralRegion(name_i, dims), actfn(actfn_),
00122     xo(dims.xoffset), yo(dims.yoffset) {  }
00123   
00124   virtual ~FixedWtRegion() {  Generic::delete_contents(inputs);  }
00125   
00126   virtual void add_input(const string& name, NeuralRegion& upstream_region,
00127                          WeightFunction& fn, Length size_scale=1.0) {
00128     WeightMatrix kernel = (fn)(size_scale);
00129     /* Kernel height and width must be odd */
00130     assert(kernel.nrows()%2==1);
00131     assert(kernel.ncols()%2==1);
00132     
00133     Generic::insert_named(inputs,name,new Input(name,kernel,&upstream_region,size_scale));
00134   }
00135 
00136   virtual Dimensions input_dimensions(WeightFunction& fn, Length size_scale=1.0) {
00137     WeightMatrix kernel = (fn)(size_scale);
00138     return Dimensions(Subscript(size_scale*output.nrows()+kernel.nrows()-1),
00139                       Subscript(size_scale*output.ncols()+kernel.ncols()-1),
00140                       Length(xo-Subscript(kernel.nrows()/2)),
00141                       Length(yo-Subscript(kernel.nrows()/2)));
00142   }
00143 
00144   virtual void activate(bool=false,bool=false,bool activate=false) {
00145     /* Activate each input */
00146     int num=0;
00147     for (inputs_type::const_iterator i=inputs.begin(); i!=inputs.end(); i++, num++) {
00148 #ifdef SPARSE_MATRICES
00149       convolve
00150 #else
00151       dense_convolve
00152 #endif
00153         ((*i)->kernel,(*i)->input->const_activity(),output,(*i)->size_scale,i==inputs.begin());
00154     }
00155     
00156     /* Normalize the result to be independent of the number of inputs */
00157     if (num>1) output *= 1.0/num;
00158     if (actfn && activate)
00159       (*actfn)(output,output);
00160     else {
00161       /* Even if no activation function, still need to threshold at zero */
00162       typedef SequenceTransform::Threshold<ActivityMatrix,ActivityMatrix> TActfn;
00163       const double zero=0.0;
00164       TActfn makepositive(&zero);
00165       (makepositive)(output,output);
00166     }
00167   }
00168 
00169   virtual void backproject () { 
00170 
00171     // clean upstream bp and residual
00172     for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
00173       if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
00174         NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
00175         int nr = inp->bp.nrows(), nc = inp->bp.ncols();
00176         for (int x=0; x<nr; ++x) 
00177           for (int y=0; y<nc; ++y) {
00178             inp->bp[x][y] = 0.0;
00179             inp->residual[x][y] = 0.0;
00180           }
00181       }
00182     }
00183 
00184     // backproject current bp to upstream bp matrix
00185     for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i){
00186       if(String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
00187 
00188         InternalNeuralRegion::backproject((*i)->name());
00189 
00190       }
00191     }
00192 
00193     // calculate error and residual
00194     for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i){
00195       if(String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
00196 
00197         NeuralRegion* inp=dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
00198 
00199         double err = 0.0, err_tmp=0.0, bp_tmp, output_tmp;
00200         int nr = inp->bp.nrows(), nc = inp->bp.ncols();
00201 
00202         for (int x=0; x<nr; ++x) 
00203           for (int y=0; y<nc; ++y) {
00204             bp_tmp        = inp->bp[x][y];
00205             output_tmp    = inp->output[x][y];
00206             err_tmp       = output_tmp - bp_tmp;
00207             err          += err_tmp * err_tmp;
00208             inp->residual[x][y]   += err_tmp;
00209           }
00210         
00211         // Temporary
00212         ipc_log(IPC_ONE,"sqrt(sum err sqr) = %f\n", sqrt(err));
00213         //sync();
00214       }
00215       
00216     }
00217 
00218   }
00219 
00220   virtual NeuralRegion* get_input(const string& name) { 
00221     Input* i = Generic::find_named<Input>(inputs,name);
00222     return (i? i->input: 0);
00223 
00224   }
00225 
00226   virtual const WeightMatrix get_weights(const string& name, int=0,int=0) const {
00227     const Input* i = Generic::find_named_const<Input>(inputs,name);
00228     //if (!i) ... /* Error -- weights not found */
00229     return (i? i->kernel : WeightMatrix());
00230   }
00231 
00232   WeightBounds* get_weights_bounds(const string& name, int ui=0, int uj=0) const
00233   {
00234     const Input* i = Generic::find_named_const<Input>(inputs,name);
00235     //if (!i) ... /* Error -- weights not found */
00236     if (!i) return new NeuralRegion::Bounds(); /* Dummy infinite bounds */
00237     
00238     WeightMatrix wts = i->kernel;
00239     assert (wts.nrows()==wts.ncols()); /* Assumption */
00240     
00241     const int radius=wts.nrows()/2;
00242     const Boundary::AARBoundingBox<int,int> r(input_row(*i,ui),input_col(*i,uj),radius);
00243     const NeuralRegion::Bounds b=i->input->bounds();
00244     return new Boundary::Intersection<int,int,double,
00245                                       Boundary::AARBoundingBox<int,int>,
00246                                       NeuralRegion::Bounds>(r,b);
00247   }
00248 
00249   virtual bool is_plastic() const  {  return false;  }
00250 
00251   double size_connection_bytes()   const
00252     {  return Generic::accumulate(ISEQ(inputs),0.0,std::mem_fun(&Input::size_bytes));  }
00253   
00254   double size_unique_connections() const 
00255     {  return Generic::accumulate(ISEQ(inputs),0.0,std::mem_fun(&Input::size_conns));  }
00256 
00257 protected:
00259   const OwningPointer<ActivationFunction> actfn;
00260 
00262   struct Input {
00263     Input(const string& name_i, const WeightMatrix& kernel_i,
00264           NeuralRegion* input_i, const Length size_scale_i)
00265       : name_str(name_i), kernel(kernel_i), input(input_i), size_scale(size_scale_i) { }
00267     const string& name() const {  return name_str;  }
00268 
00269     const double size_bytes() const {  return mat::size(kernel)*sizeof(WeightMatrix::value_type);  }
00270     const double size_conns() const {  return mat::size(kernel);  }
00271     
00272     string name_str;              
00273     WeightMatrix kernel;          
00274     NeuralRegion* input;    
00275     Length size_scale;            
00276   };
00277 
00278   inline Subscript input_row(const Input& input, Subscript row) const
00279     {  return Subscript(row*(input.size_scale)+(input.kernel.nrows()/2.0));  }
00280          
00281   inline Subscript input_col(const Input& input, Subscript col) const
00282     {  return Subscript(col*(input.size_scale)+(input.kernel.ncols()/2.0));  }
00283          
00285   typedef std::vector<Input*> inputs_type;
00286 
00288   const Activity xo,yo; 
00291   inputs_type            inputs;
00292 };
00293 
00294 
00295 #endif /* __FIXEDWTREGION_H__ */

Generated on Mon Jan 20 02:35:44 2003 for RF-LISSOM by doxygen1.3-rc2