comparison mupdf-source/thirdparty/tesseract/src/lstm/parallel.cpp @ 2:b50eed0cc0ef upstream

ADD: MuPDF v1.26.7: the MuPDF source as downloaded by a default build of PyMuPDF 1.26.4. The directory name has changed: no version number in the expanded directory now.
author Franz Glasner <fzglas.hg@dom66.de>
date Mon, 15 Sep 2025 11:43:07 +0200
parents
children
comparison
equal deleted inserted replaced
1:1d09e1dec1d9 2:b50eed0cc0ef
1 /////////////////////////////////////////////////////////////////////////
2 // File: parallel.cpp
3 // Description: Runs networks in parallel on the same input.
4 // Author: Ray Smith
5 //
6 // (C) Copyright 2013, Google Inc.
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 // http://www.apache.org/licenses/LICENSE-2.0
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 ///////////////////////////////////////////////////////////////////////
17
18 #ifdef HAVE_CONFIG_H
19 # include "config_auto.h"
20 #endif
21
22 #include "parallel.h"
23
24 #ifdef _OPENMP
25 # include <omp.h>
26 #endif
27
28 #include "functions.h" // For conditional undef of _OPENMP.
29 #include "networkscratch.h"
30
31 namespace tesseract {
32
33 // ni_ and no_ will be set by AddToStack.
34 Parallel::Parallel(const std::string &name, NetworkType type) : Plumbing(name) {
35 type_ = type;
36 }
37
38 // Returns the shape output from the network given an input shape (which may
39 // be partially unknown ie zero).
40 StaticShape Parallel::OutputShape(const StaticShape &input_shape) const {
41 StaticShape result = stack_[0]->OutputShape(input_shape);
42 int stack_size = stack_.size();
43 for (int i = 1; i < stack_size; ++i) {
44 StaticShape shape = stack_[i]->OutputShape(input_shape);
45 result.set_depth(result.depth() + shape.depth());
46 }
47 return result;
48 }
49
50 // Runs forward propagation of activations on the input line.
51 // See NetworkCpp for a detailed discussion of the arguments.
52 void Parallel::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
53 NetworkScratch *scratch, NetworkIO *output) {
54 bool parallel_debug = false;
55 // If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
56 // or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
57 if (debug && type_ != NT_PARALLEL) {
58 parallel_debug = true;
59 debug = false;
60 }
61 int stack_size = stack_.size();
62 if (type_ == NT_PAR_2D_LSTM) {
63 // Special case, run parallel in parallel.
64 std::vector<NetworkScratch::IO> results(stack_size);
65 for (int i = 0; i < stack_size; ++i) {
66 results[i].Resize(input, stack_[i]->NumOutputs(), scratch);
67 }
68 #ifdef _OPENMP
69 # pragma omp parallel for num_threads(stack_size)
70 #endif
71 for (int i = 0; i < stack_size; ++i) {
72 stack_[i]->Forward(debug, input, nullptr, scratch, results[i]);
73 }
74 // Now pack all the results (serially) into the output.
75 int out_offset = 0;
76 output->Resize(*results[0], NumOutputs());
77 for (int i = 0; i < stack_size; ++i) {
78 out_offset = output->CopyPacking(*results[i], out_offset);
79 }
80 } else {
81 // Revolving intermediate result.
82 NetworkScratch::IO result(input, scratch);
83 // Source for divided replicated.
84 NetworkScratch::IO source_part;
85 TransposedArray *src_transpose = nullptr;
86 if (IsTraining() && type_ == NT_REPLICATED) {
87 // Make a transposed copy of the input.
88 input.Transpose(&transposed_input_);
89 src_transpose = &transposed_input_;
90 }
91 // Run each network, putting the outputs into result.
92 int out_offset = 0;
93 for (int i = 0; i < stack_size; ++i) {
94 stack_[i]->Forward(debug, input, src_transpose, scratch, result);
95 // All networks must have the same output width
96 if (i == 0) {
97 output->Resize(*result, NumOutputs());
98 } else {
99 ASSERT_HOST(result->Width() == output->Width());
100 }
101 out_offset = output->CopyPacking(*result, out_offset);
102 }
103 }
104 #ifndef GRAPHICS_DISABLED
105 if (parallel_debug) {
106 DisplayForward(*output);
107 }
108 #endif
109 }
110
111 // Runs backward propagation of errors on the deltas line.
112 // See NetworkCpp for a detailed discussion of the arguments.
113 bool Parallel::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
114 NetworkIO *back_deltas) {
115 // If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
116 // or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
117 if (debug && type_ != NT_PARALLEL) {
118 #ifndef GRAPHICS_DISABLED
119 DisplayBackward(fwd_deltas);
120 #endif
121 debug = false;
122 }
123 auto stack_size = stack_.size();
124 if (type_ == NT_PAR_2D_LSTM) {
125 // Special case, run parallel in parallel.
126 std::vector<NetworkScratch::IO> in_deltas(stack_size);
127 std::vector<NetworkScratch::IO> out_deltas(stack_size);
128 // Split the forward deltas for each stack element.
129 int feature_offset = 0;
130 for (unsigned i = 0; i < stack_.size(); ++i) {
131 int num_features = stack_[i]->NumOutputs();
132 in_deltas[i].Resize(fwd_deltas, num_features, scratch);
133 out_deltas[i].Resize(fwd_deltas, stack_[i]->NumInputs(), scratch);
134 in_deltas[i]->CopyUnpacking(fwd_deltas, feature_offset, num_features);
135 feature_offset += num_features;
136 }
137 #ifdef _OPENMP
138 # pragma omp parallel for num_threads(stack_size)
139 #endif
140 for (unsigned i = 0; i < stack_size; ++i) {
141 stack_[i]->Backward(debug, *in_deltas[i], scratch, i == 0 ? back_deltas : out_deltas[i]);
142 }
143 if (needs_to_backprop_) {
144 for (unsigned i = 1; i < stack_size; ++i) {
145 back_deltas->AddAllToFloat(*out_deltas[i]);
146 }
147 }
148 } else {
149 // Revolving partial deltas.
150 NetworkScratch::IO in_deltas(fwd_deltas, scratch);
151 // The sum of deltas from different sources, which will eventually go into
152 // back_deltas.
153 NetworkScratch::IO out_deltas;
154 int feature_offset = 0;
155 for (unsigned i = 0; i < stack_.size(); ++i) {
156 int num_features = stack_[i]->NumOutputs();
157 in_deltas->CopyUnpacking(fwd_deltas, feature_offset, num_features);
158 feature_offset += num_features;
159 if (stack_[i]->Backward(debug, *in_deltas, scratch, back_deltas)) {
160 if (i == 0) {
161 out_deltas.ResizeFloat(*back_deltas, back_deltas->NumFeatures(), scratch);
162 out_deltas->CopyAll(*back_deltas);
163 } else if (back_deltas->NumFeatures() == out_deltas->NumFeatures()) {
164 // Widths are allowed to be different going back, as we may have
165 // input nets, so only accumulate the deltas if the widths are the
166 // same.
167 out_deltas->AddAllToFloat(*back_deltas);
168 }
169 }
170 }
171 if (needs_to_backprop_) {
172 back_deltas->CopyAll(*out_deltas);
173 }
174 }
175 if (needs_to_backprop_) {
176 back_deltas->ScaleFloatBy(1.0f / stack_size);
177 }
178 return needs_to_backprop_;
179 }
180
181 } // namespace tesseract.