tesseract  4.0.0-beta.1-59-g2cc4
weightmatrix.cpp
Go to the documentation of this file.
1 // File: weightmatrix.cpp
3 // Description: Hides distinction between float/int implementations.
4 // Author: Ray Smith
5 // Created: Tue Jun 17 11:46:20 PST 2014
6 //
7 // (C) Copyright 2014, Google Inc.
8 // Licensed under the Apache License, Version 2.0 (the "License");
9 // you may not use this file except in compliance with the License.
10 // You may obtain a copy of the License at
11 // http://www.apache.org/licenses/LICENSE-2.0
12 // Unless required by applicable law or agreed to in writing, software
13 // distributed under the License is distributed on an "AS IS" BASIS,
14 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 // See the License for the specific language governing permissions and
16 // limitations under the License.
18 
19 #include "weightmatrix.h"
20 
21 #include "dotproductavx.h"
22 #include "dotproductsse.h"
23 #include "intsimdmatrix.h"
24 #include "simddetect.h"
25 #include "statistc.h"
26 #include "tprintf.h"
27 
28 namespace tesseract {
29 
30 #if (defined(_MSC_VER) && _MSC_VER < 1900) || defined(ANDROID)
31 static inline double log2(double n) {
32  return log(n) / log(2.0);
33 }
34 #endif // _MSC_VER
35 
36 // Number of iterations after which the correction effectively becomes unity.
37 const int kAdamCorrectionIterations = 200000;
38 // Epsilon in Adam to prevent division by zero.
39 const double kAdamEpsilon = 1e-8;
40 
41 // Copies the whole input transposed, converted to double, into *this.
43  int width = input.dim1();
44  int num_features = input.dim2();
45  ResizeNoInit(num_features, width);
46  for (int t = 0; t < width; ++t) WriteStrided(t, input[t]);
47 }
48 
49 // Sets up the network for training. Initializes weights using weights of
50 // scale `range` picked according to the random number generator `randomizer`.
51 int WeightMatrix::InitWeightsFloat(int no, int ni, bool use_adam,
52  float weight_range, TRand* randomizer) {
53  int_mode_ = false;
54  wf_.Resize(no, ni, 0.0);
55  if (randomizer != nullptr) {
56  for (int i = 0; i < no; ++i) {
57  for (int j = 0; j < ni; ++j) {
58  wf_[i][j] = randomizer->SignedRand(weight_range);
59  }
60  }
61  }
62  use_adam_ = use_adam;
63  InitBackward();
64  return ni * no;
65 }
66 
67 // Changes the number of outputs to the size of the given code_map, copying
68 // the old weight matrix entries for each output from code_map[output] where
69 // non-negative, and uses the mean (over all outputs) of the existing weights
70 // for all outputs with negative code_map entries. Returns the new number of
71 // weights.
72 int WeightMatrix::RemapOutputs(const std::vector<int>& code_map) {
73  GENERIC_2D_ARRAY<double> old_wf(wf_);
74  int old_no = wf_.dim1();
75  int new_no = code_map.size();
76  int ni = wf_.dim2();
77  std::vector<double> means(ni, 0.0);
78  for (int c = 0; c < old_no; ++c) {
79  const double* weights = wf_[c];
80  for (int i = 0; i < ni; ++i) means[i] += weights[i];
81  }
82  for (double& mean : means) mean /= old_no;
83  wf_.ResizeNoInit(new_no, ni);
84  InitBackward();
85  for (int dest = 0; dest < new_no; ++dest) {
86  int src = code_map[dest];
87  const double* src_data = src >= 0 ? old_wf[src] : means.data();
88  memcpy(wf_[dest], src_data, ni * sizeof(*src_data));
89  }
90  return ni * new_no;
91 }
92 
93 // Converts a float network to an int network. Each set of input weights that
94 // corresponds to a single output weight is converted independently:
95 // Compute the max absolute value of the weight set.
96 // Scale so the max absolute value becomes INT8_MAX.
97 // Round to integer.
98 // Store a multiplicative scale factor (as a double) that will reproduce
99 // the original value, subject to rounding errors.
101  wi_.ResizeNoInit(wf_.dim1(), wf_.dim2());
102  scales_.init_to_size(wi_.dim1(), 0.0);
103  int dim2 = wi_.dim2();
104  for (int t = 0; t < wi_.dim1(); ++t) {
105  double* f_line = wf_[t];
106  int8_t* i_line = wi_[t];
107  double max_abs = 0.0;
108  for (int f = 0; f < dim2; ++f) {
109  double abs_val = fabs(f_line[f]);
110  if (abs_val > max_abs) max_abs = abs_val;
111  }
112  double scale = max_abs / INT8_MAX;
113  scales_[t] = scale;
114  if (scale == 0.0) scale = 1.0;
115  for (int f = 0; f < dim2; ++f) {
116  i_line[f] = IntCastRounded(f_line[f] / scale);
117  }
118  }
119  wf_.Resize(1, 1, 0.0);
120  int_mode_ = true;
121  multiplier_.reset(IntSimdMatrix::GetFastestMultiplier());
122  if (multiplier_ != nullptr) multiplier_->Init(wi_);
123 }
124 
125 // Allocates any needed memory for running Backward, and zeroes the deltas,
126 // thus eliminating any existing momentum.
128  int no = int_mode_ ? wi_.dim1() : wf_.dim1();
129  int ni = int_mode_ ? wi_.dim2() : wf_.dim2();
130  dw_.Resize(no, ni, 0.0);
131  updates_.Resize(no, ni, 0.0);
132  wf_t_.Transpose(wf_);
133  if (use_adam_) dw_sq_sum_.Resize(no, ni, 0.0);
134 }
135 
136 // Flag on mode to indicate that this weightmatrix uses int8_t.
137 const int kInt8Flag = 1;
138 // Flag on mode to indicate that this weightmatrix uses adam.
139 const int kAdamFlag = 4;
140 // Flag on mode to indicate that this weightmatrix uses double. Set
141 // independently of kInt8Flag as even in int mode the scales can
142 // be float or double.
143 const int kDoubleFlag = 128;
144 
145 // Writes to the given file. Returns false in case of error.
146 bool WeightMatrix::Serialize(bool training, TFile* fp) const {
147  // For backward compatibility, add kDoubleFlag to mode to indicate the doubles
148  // format, without errs, so we can detect and read old format weight matrices.
149  uint8_t mode =
150  (int_mode_ ? kInt8Flag : 0) | (use_adam_ ? kAdamFlag : 0) | kDoubleFlag;
151  if (fp->FWrite(&mode, sizeof(mode), 1) != 1) return false;
152  if (int_mode_) {
153  if (!wi_.Serialize(fp)) return false;
154  if (!scales_.Serialize(fp)) return false;
155  } else {
156  if (!wf_.Serialize(fp)) return false;
157  if (training && !updates_.Serialize(fp)) return false;
158  if (training && use_adam_ && !dw_sq_sum_.Serialize(fp)) return false;
159  }
160  return true;
161 }
162 
163 // Reads from the given file. Returns false in case of error.
164 
165 bool WeightMatrix::DeSerialize(bool training, TFile* fp) {
166  uint8_t mode = 0;
167  if (fp->FRead(&mode, sizeof(mode), 1) != 1) return false;
168  int_mode_ = (mode & kInt8Flag) != 0;
169  use_adam_ = (mode & kAdamFlag) != 0;
170  if ((mode & kDoubleFlag) == 0) return DeSerializeOld(training, fp);
171  if (int_mode_) {
172  if (!wi_.DeSerialize(fp)) return false;
173  if (!scales_.DeSerialize(fp)) return false;
174  multiplier_.reset(IntSimdMatrix::GetFastestMultiplier());
175  if (multiplier_ != nullptr) multiplier_->Init(wi_);
176  } else {
177  if (!wf_.DeSerialize(fp)) return false;
178  if (training) {
179  InitBackward();
180  if (!updates_.DeSerialize(fp)) return false;
181  if (use_adam_ && !dw_sq_sum_.DeSerialize(fp)) return false;
182  }
183  }
184  return true;
185 }
186 
187 // As DeSerialize, but reads an old (float) format WeightMatrix for
188 // backward compatibility.
189 bool WeightMatrix::DeSerializeOld(bool training, TFile* fp) {
190  GENERIC_2D_ARRAY<float> float_array;
191  if (int_mode_) {
192  if (!wi_.DeSerialize(fp)) return false;
193  GenericVector<float> old_scales;
194  if (!old_scales.DeSerialize(fp)) return false;
195  scales_.resize_no_init(old_scales.size());
196  for (int i = 0; i < old_scales.size(); ++i) scales_[i] = old_scales[i];
197  } else {
198  if (!float_array.DeSerialize(fp)) return false;
199  FloatToDouble(float_array, &wf_);
200  }
201  if (training) {
202  InitBackward();
203  if (!float_array.DeSerialize(fp)) return false;
204  FloatToDouble(float_array, &updates_);
205  // Errs was only used in int training, which is now dead.
206  if (!float_array.DeSerialize(fp)) return false;
207  }
208  return true;
209 }
210 
211 // Computes matrix.vector v = Wu.
212 // u is of size W.dim2() - 1 and the output v is of size W.dim1().
213 // u is imagined to have an extra element at the end with value 1, to
214 // implement the bias, but it doesn't actually have it.
215 // Asserts that the call matches what we have.
216 void WeightMatrix::MatrixDotVector(const double* u, double* v) const {
217  ASSERT_HOST(!int_mode_);
218  MatrixDotVectorInternal(wf_, true, false, u, v);
219 }
220 
221 void WeightMatrix::MatrixDotVector(const int8_t* u, double* v) const {
222  ASSERT_HOST(int_mode_);
223  ASSERT_HOST(multiplier_ != nullptr);
224  multiplier_->MatrixDotVector(wi_, scales_, u, v);
225 }
226 
227 // MatrixDotVector for peep weights, MultiplyAccumulate adds the
228 // component-wise products of *this[0] and v to inout.
229 void WeightMatrix::MultiplyAccumulate(const double* v, double* inout) {
230  ASSERT_HOST(!int_mode_);
231  ASSERT_HOST(wf_.dim1() == 1);
232  int n = wf_.dim2();
233  const double* u = wf_[0];
234  for (int i = 0; i < n; ++i) {
235  inout[i] += u[i] * v[i];
236  }
237 }
238 
239 // Computes vector.matrix v = uW.
240 // u is of size W.dim1() and the output v is of size W.dim2() - 1.
241 // The last result is discarded, as v is assumed to have an imaginary
242 // last value of 1, as with MatrixDotVector.
243 void WeightMatrix::VectorDotMatrix(const double* u, double* v) const {
244  ASSERT_HOST(!int_mode_);
245  MatrixDotVectorInternal(wf_t_, false, true, u, v);
246 }
247 
248 // Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements from
249 // u and v. In terms of the neural network, u is the gradients and v is the
250 // inputs.
251 // Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0.
252 // Runs parallel if requested. Note that u and v must be transposed.
254  const TransposedArray& v,
255  bool in_parallel) {
256  ASSERT_HOST(!int_mode_);
257  int num_outputs = dw_.dim1();
258  ASSERT_HOST(u.dim1() == num_outputs);
259  ASSERT_HOST(u.dim2() == v.dim2());
260  int num_inputs = dw_.dim2() - 1;
261  int num_samples = u.dim2();
262  // v is missing the last element in dim1.
263  ASSERT_HOST(v.dim1() == num_inputs);
264 #ifdef _OPENMP
265 #pragma omp parallel for num_threads(4) if (in_parallel)
266 #endif
267  for (int i = 0; i < num_outputs; ++i) {
268  double* dwi = dw_[i];
269  const double* ui = u[i];
270  for (int j = 0; j < num_inputs; ++j) {
271  dwi[j] = DotProduct(ui, v[j], num_samples);
272  }
273  // The last element of v is missing, presumed 1.0f.
274  double total = 0.0;
275  for (int k = 0; k < num_samples; ++k) total += ui[k];
276  dwi[num_inputs] = total;
277  }
278 }
279 
280 // Updates the weights using the given learning rate and momentum.
281 // num_samples is the quotient to be used in the adam computation iff
282 // use_adam_ is true.
283 void WeightMatrix::Update(double learning_rate, double momentum,
284  double adam_beta, int num_samples) {
285  ASSERT_HOST(!int_mode_);
286  if (use_adam_ && num_samples > 0 && num_samples < kAdamCorrectionIterations) {
287  learning_rate *= sqrt(1.0 - pow(adam_beta, num_samples));
288  learning_rate /= 1.0 - pow(momentum, num_samples);
289  }
290  if (use_adam_ && num_samples > 0 && momentum > 0.0) {
291  dw_sq_sum_.SumSquares(dw_, adam_beta);
292  dw_ *= learning_rate * (1.0 - momentum);
293  updates_ *= momentum;
294  updates_ += dw_;
295  wf_.AdamUpdate(updates_, dw_sq_sum_, learning_rate * kAdamEpsilon);
296  } else {
297  dw_ *= learning_rate;
298  updates_ += dw_;
299  if (momentum > 0.0) wf_ += updates_;
300  if (momentum >= 0.0) updates_ *= momentum;
301  }
302  wf_t_.Transpose(wf_);
303 }
304 
305 // Adds the dw_ in other to the dw_ is *this.
307  ASSERT_HOST(dw_.dim1() == other.dw_.dim1());
308  ASSERT_HOST(dw_.dim2() == other.dw_.dim2());
309  dw_ += other.dw_;
310 }
311 
312 // Sums the products of weight updates in *this and other, splitting into
313 // positive (same direction) in *same and negative (different direction) in
314 // *changed.
315 void WeightMatrix::CountAlternators(const WeightMatrix& other, double* same,
316  double* changed) const {
317  int num_outputs = updates_.dim1();
318  int num_inputs = updates_.dim2();
319  ASSERT_HOST(num_outputs == other.updates_.dim1());
320  ASSERT_HOST(num_inputs == other.updates_.dim2());
321  for (int i = 0; i < num_outputs; ++i) {
322  const double* this_i = updates_[i];
323  const double* other_i = other.updates_[i];
324  for (int j = 0; j < num_inputs; ++j) {
325  double product = this_i[j] * other_i[j];
326  if (product < 0.0)
327  *changed -= product;
328  else
329  *same += product;
330  }
331  }
332 }
333 
334 // Helper computes an integer histogram bucket for a weight and adds it
335 // to the histogram.
336 const int kHistogramBuckets = 16;
337 static void HistogramWeight(double weight, STATS* histogram) {
338  int bucket = kHistogramBuckets - 1;
339  if (weight != 0.0) {
340  double logval = -log2(fabs(weight));
341  bucket = ClipToRange(IntCastRounded(logval), 0, kHistogramBuckets - 1);
342  }
343  histogram->add(bucket, 1);
344 }
345 
346 void WeightMatrix::Debug2D(const char* msg) {
347  STATS histogram(0, kHistogramBuckets);
348  if (int_mode_) {
349  for (int i = 0; i < wi_.dim1(); ++i) {
350  for (int j = 0; j < wi_.dim2(); ++j) {
351  HistogramWeight(wi_[i][j] * scales_[i], &histogram);
352  }
353  }
354  } else {
355  for (int i = 0; i < wf_.dim1(); ++i) {
356  for (int j = 0; j < wf_.dim2(); ++j) {
357  HistogramWeight(wf_[i][j], &histogram);
358  }
359  }
360  }
361  tprintf("%s\n", msg);
362  histogram.print();
363 }
364 
365 // Computes and returns the dot product of the two n-vectors u and v.
366 /* static */
367 double WeightMatrix::DotProduct(const double* u, const double* v, int n) {
368  // Note: because the order of addition is different among the 3 DotProduct
369  // functions, the results can (and do) vary slightly (although they agree
370  // to within about 4e-15). This produces different results when running
371  // training, despite all random inputs being precisely equal.
372  // To get consistent results, use just one of these DotProduct functions.
373  // On a test multi-layer network, serial is 57% slower than sse, and avx
374  // is about 8% faster than sse. This suggests that the time is memory
375  // bandwidth constrained and could benefit from holding the reused vector
376  // in AVX registers.
377  if (SIMDDetect::IsAVXAvailable()) return DotProductAVX(u, v, n);
378  if (SIMDDetect::IsSSEAvailable()) return DotProductSSE(u, v, n);
379  double total = 0.0;
380  for (int k = 0; k < n; ++k) total += u[k] * v[k];
381  return total;
382 }
383 
384 // Utility function converts an array of float to the corresponding array
385 // of double.
386 /* static */
389  int dim1 = wf.dim1();
390  int dim2 = wf.dim2();
391  wd->ResizeNoInit(dim1, dim2);
392  for (int i = 0; i < dim1; ++i) {
393  const float* wfi = wf[i];
394  double* wdi = (*wd)[i];
395  for (int j = 0; j < dim2; ++j) wdi[j] = static_cast<double>(wfi[j]);
396  }
397 }
398 
399 // Computes matrix.vector v = Wu.
400 // u is of size W.dim2() - add_bias_fwd and the output v is of size
401 // W.dim1() - skip_bias_back.
402 // If add_bias_fwd, u is imagined to have an extra element at the end with value
403 // 1, to implement the bias, weight.
404 // If skip_bias_back, we are actullay performing the backwards product on a
405 // transposed matrix, so we need to drop the v output corresponding to the last
406 // element in dim1.
407 void WeightMatrix::MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
408  bool add_bias_fwd,
409  bool skip_bias_back, const double* u,
410  double* v) {
411  int num_results = w.dim1() - skip_bias_back;
412  int extent = w.dim2() - add_bias_fwd;
413  for (int i = 0; i < num_results; ++i) {
414  const double* wi = w[i];
415  double total = DotProduct(wi, u, extent);
416  if (add_bias_fwd) total += wi[extent]; // The bias value.
417  v[i] = total;
418  }
419 }
420 
421 } // namespace tesseract.
void print() const
Definition: statistc.cpp:532
const int kHistogramBuckets
void ResizeNoInit(int size1, int size2, int pad=0)
Definition: matrix.h:88
static bool IsAVXAvailable()
Definition: simddetect.h:26
void Debug2D(const char *msg)
void Transpose(const GENERIC_2D_ARRAY< double > &input)
static IntSimdMatrix * GetFastestMultiplier()
void AddDeltas(const WeightMatrix &other)
const int kDoubleFlag
int FRead(void *buffer, int size, int count)
Definition: serialis.cpp:112
const int kInt8Flag
int dim1() const
Definition: matrix.h:205
static bool IsSSEAvailable()
Definition: simddetect.h:38
double SignedRand(double range)
Definition: helpers.h:60
bool DeSerialize(bool swap, FILE *fp)
Definition: matrix.h:159
const int kAdamCorrectionIterations
int size() const
Definition: genericvector.h:72
Definition: statistc.h:33
int RemapOutputs(const std::vector< int > &code_map)
void Update(double learning_rate, double momentum, double adam_beta, int num_samples)
void VectorDotMatrix(const double *u, double *v) const
const double kAdamEpsilon
#define ASSERT_HOST(x)
Definition: errcode.h:84
void resize_no_init(int size)
Definition: genericvector.h:66
void MultiplyAccumulate(const double *v, double *inout)
bool Serialize(bool training, TFile *fp) const
#define tprintf(...)
Definition: tprintf.h:31
bool DeSerialize(bool training, TFile *fp)
T ClipToRange(const T &x, const T &lower_bound, const T &upper_bound)
Definition: helpers.h:122
bool DeSerialize(bool swap, FILE *fp)
int dim2() const
Definition: matrix.h:206
static double DotProduct(const double *u, const double *v, int n)
const int kAdamFlag
void add(int32_t value, int32_t count)
Definition: statistc.cpp:99
int InitWeightsFloat(int no, int ni, bool use_adam, float weight_range, TRand *randomizer)
double DotProductSSE(const double *u, const double *v, int n)
CMD_EVENTS mode
Definition: pgedit.cpp:116
int IntCastRounded(double x)
Definition: helpers.h:179
static void FloatToDouble(const GENERIC_2D_ARRAY< float > &wf, GENERIC_2D_ARRAY< double > *wd)
void SumOuterTransposed(const TransposedArray &u, const TransposedArray &v, bool parallel)
int FWrite(const void *buffer, int size, int count)
Definition: serialis.cpp:152
double DotProductAVX(const double *u, const double *v, int n)
bool DeSerializeOld(bool training, TFile *fp)
void WriteStrided(int t, const float *data)
Definition: weightmatrix.h:39
void CountAlternators(const WeightMatrix &other, double *same, double *changed) const
void MatrixDotVector(const double *u, double *v) const