Mercurial > hgrepos > Python2 > PyMuPDF
comparison mupdf-source/thirdparty/zxing-cpp/core/src/Pattern.h @ 2:b50eed0cc0ef upstream
ADD: MuPDF v1.26.7: the MuPDF source as downloaded by a default build of PyMuPDF 1.26.4.
The directory name has changed: no version number in the expanded directory now.
| author | Franz Glasner <fzglas.hg@dom66.de> |
|---|---|
| date | Mon, 15 Sep 2025 11:43:07 +0200 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 1:1d09e1dec1d9 | 2:b50eed0cc0ef |
|---|---|
| 1 /* | |
| 2 * Copyright 2020 Axel Waggershauser | |
| 3 */ | |
| 4 // SPDX-License-Identifier: Apache-2.0 | |
| 5 | |
| 6 #pragma once | |
| 7 | |
| 8 #include "BitHacks.h" | |
| 9 #include "Range.h" | |
| 10 #include "ZXAlgorithms.h" | |
| 11 | |
| 12 #include <algorithm> | |
| 13 #include <array> | |
| 14 #include <cmath> | |
| 15 #include <cstddef> | |
| 16 #include <cstdint> | |
| 17 #include <limits> | |
| 18 #include <vector> | |
| 19 | |
| 20 namespace ZXing { | |
| 21 | |
| 22 using PatternType = uint16_t; | |
| 23 template<int N> using Pattern = std::array<PatternType, N>; | |
| 24 using PatternRow = std::vector<PatternType>; | |
| 25 | |
| 26 class PatternView | |
| 27 { | |
| 28 using Iterator = PatternRow::const_pointer; | |
| 29 Iterator _data = nullptr; | |
| 30 int _size = 0; | |
| 31 Iterator _base = nullptr; | |
| 32 Iterator _end = nullptr; | |
| 33 | |
| 34 public: | |
| 35 using value_type = PatternRow::value_type; | |
| 36 | |
| 37 PatternView() = default; | |
| 38 | |
| 39 // A PatternRow always starts with the width of whitespace in front of the first black bar. | |
| 40 // The first element of the PatternView is the first bar. | |
| 41 PatternView(const PatternRow& bars) | |
| 42 : _data(bars.data() + 1), _size(Size(bars) - 1), _base(bars.data()), _end(bars.data() + bars.size()) | |
| 43 {} | |
| 44 | |
| 45 PatternView(Iterator data, int size, Iterator base, Iterator end) : _data(data), _size(size), _base(base), _end(end) {} | |
| 46 | |
| 47 template <size_t N> | |
| 48 PatternView(const Pattern<N>& row) : _data(row.data()), _size(N) | |
| 49 {} | |
| 50 | |
| 51 Iterator data() const { return _data; } | |
| 52 Iterator begin() const { return _data; } | |
| 53 Iterator end() const { return _data + _size; } | |
| 54 | |
| 55 value_type operator[](int i) const | |
| 56 { | |
| 57 // assert(i < _count); | |
| 58 return _data[i]; | |
| 59 } | |
| 60 | |
| 61 int sum(int n = 0) const { return Reduce(_data, _data + (n == 0 ? _size : n)); } | |
| 62 int size() const { return _size; } | |
| 63 | |
| 64 // index is the number of bars and spaces from the first bar to the current position | |
| 65 int index() const { return narrow_cast<int>(_data - _base) - 1; } | |
| 66 int pixelsInFront() const { return Reduce(_base, _data); } | |
| 67 int pixelsTillEnd() const { return Reduce(_base, _data + _size) - 1; } | |
| 68 bool isAtFirstBar() const { return _data == _base + 1; } | |
| 69 bool isAtLastBar() const { return _data + _size == _end - 1; } | |
| 70 bool isValid(int n) const { return _data && _data >= _base && _data + n <= _end; } | |
| 71 bool isValid() const { return isValid(size()); } | |
| 72 | |
| 73 template<bool acceptIfAtFirstBar = false> | |
| 74 bool hasQuietZoneBefore(float scale) const | |
| 75 { | |
| 76 return (acceptIfAtFirstBar && isAtFirstBar()) || _data[-1] >= sum() * scale; | |
| 77 } | |
| 78 | |
| 79 template<bool acceptIfAtLastBar = true> | |
| 80 bool hasQuietZoneAfter(float scale) const | |
| 81 { | |
| 82 return (acceptIfAtLastBar && isAtLastBar()) || _data[_size] >= sum() * scale; | |
| 83 } | |
| 84 | |
| 85 PatternView subView(int offset, int size = 0) const | |
| 86 { | |
| 87 // if(std::abs(size) > count()) | |
| 88 // printf("%d > %d\n", std::abs(size), _count); | |
| 89 // assert(std::abs(size) <= count()); | |
| 90 if (size == 0) | |
| 91 size = _size - offset; | |
| 92 else if (size < 0) | |
| 93 size = _size - offset + size; | |
| 94 return {begin() + offset, std::max(size, 0), _base, _end}; | |
| 95 } | |
| 96 | |
| 97 bool shift(int n) | |
| 98 { | |
| 99 return _data && ((_data += n) + _size <= _end); | |
| 100 } | |
| 101 | |
| 102 bool skipPair() | |
| 103 { | |
| 104 return shift(2); | |
| 105 } | |
| 106 | |
| 107 bool skipSymbol() | |
| 108 { | |
| 109 return shift(_size); | |
| 110 } | |
| 111 | |
| 112 bool skipSingle(int maxWidth) | |
| 113 { | |
| 114 return shift(1) && _data[-1] <= maxWidth; | |
| 115 } | |
| 116 | |
| 117 void extend() | |
| 118 { | |
| 119 _size = std::max(0, narrow_cast<int>(_end - _data)); | |
| 120 } | |
| 121 }; | |
| 122 | |
| 123 /** | |
| 124 * @brief The BarAndSpace struct is a simple 2 element data structure to hold information about bar(s) and space(s). | |
| 125 * | |
| 126 * The operator[](int) can be used in combination with a PatternView | |
| 127 */ | |
| 128 template <typename T> | |
| 129 struct BarAndSpace | |
| 130 { | |
| 131 using value_type = T; | |
| 132 T bar = {}, space = {}; | |
| 133 // even index -> bar, odd index -> space | |
| 134 constexpr T& operator[](int i) noexcept { return reinterpret_cast<T*>(this)[i & 1]; } | |
| 135 constexpr T operator[](int i) const noexcept { return reinterpret_cast<const T*>(this)[i & 1]; } | |
| 136 bool isValid() const { return bar != T{} && space != T{}; } | |
| 137 }; | |
| 138 | |
| 139 using BarAndSpaceI = BarAndSpace<PatternType>; | |
| 140 | |
| 141 template <int LEN, typename RT, typename T> | |
| 142 constexpr auto BarAndSpaceSum(const T* view) noexcept | |
| 143 { | |
| 144 BarAndSpace<RT> res; | |
| 145 for (int i = 0; i < LEN; ++i) | |
| 146 res[i] += view[i]; | |
| 147 return res; | |
| 148 } | |
| 149 | |
| 150 /** | |
| 151 * @brief FixedPattern describes a compile-time constant (start/stop) pattern. | |
| 152 * | |
| 153 * N = number of bars/spaces | |
| 154 * SUM = sum over all N elements (size of pattern in modules) | |
| 155 * IS_SPARCE = whether or not the pattern contains '0's denoting 'wide' bars/spaces | |
| 156 */ | |
| 157 template <int N, int SUM, bool IS_SPARCE = false> | |
| 158 struct FixedPattern | |
| 159 { | |
| 160 using value_type = PatternRow::value_type; | |
| 161 value_type _data[N]; | |
| 162 constexpr value_type operator[](int i) const noexcept { return _data[i]; } | |
| 163 constexpr const value_type* data() const noexcept { return _data; } | |
| 164 constexpr int size() const noexcept { return N; } | |
| 165 constexpr BarAndSpace<value_type> sums() const noexcept { return BarAndSpaceSum<N, value_type>(_data); } | |
| 166 }; | |
| 167 | |
| 168 template <int N, int SUM> | |
| 169 using FixedSparcePattern = FixedPattern<N, SUM, true>; | |
| 170 | |
| 171 template <bool E2E = false, int LEN, int SUM> | |
| 172 double IsPattern(const PatternView& view, const FixedPattern<LEN, SUM, false>& pattern, int spaceInPixel = 0, | |
| 173 double minQuietZone = 0, double moduleSizeRef = 0) | |
| 174 { | |
| 175 if constexpr (E2E) { | |
| 176 auto widths = BarAndSpaceSum<LEN, double>(view.data()); | |
| 177 auto sums = pattern.sums(); | |
| 178 BarAndSpace<double> modSize = {widths[0] / sums[0], widths[1] / sums[1]}; | |
| 179 | |
| 180 auto [m, M] = std::minmax(modSize[0], modSize[1]); | |
| 181 if (M > 4 * m) // make sure module sizes of bars and spaces are not too far away from each other | |
| 182 return 0; | |
| 183 | |
| 184 if (minQuietZone && spaceInPixel < minQuietZone * modSize.space) | |
| 185 return 0; | |
| 186 | |
| 187 const BarAndSpace<double> thr = {modSize[0] * .75 + .5, modSize[1] / (2 + (LEN < 6)) + .5}; | |
| 188 | |
| 189 for (int x = 0; x < LEN; ++x) | |
| 190 if (std::abs(view[x] - pattern[x] * modSize[x]) > thr[x]) | |
| 191 return 0; | |
| 192 | |
| 193 return (modSize[0] + modSize[1]) / 2; | |
| 194 } | |
| 195 | |
| 196 double width = view.sum(LEN); | |
| 197 if (SUM > LEN && width < SUM) | |
| 198 return 0; | |
| 199 | |
| 200 const auto moduleSize = width / SUM; | |
| 201 | |
| 202 if (minQuietZone && spaceInPixel < minQuietZone * moduleSize - 1) | |
| 203 return 0; | |
| 204 | |
| 205 if (!moduleSizeRef) | |
| 206 moduleSizeRef = moduleSize; | |
| 207 | |
| 208 // the offset of 0.5 is to make the code less sensitive to quantization errors for small (near 1) module sizes. | |
| 209 // TODO: review once we have upsampling in the binarizer in place. | |
| 210 const auto threshold = moduleSizeRef * (0.5 + E2E * 0.25) + 0.5; | |
| 211 | |
| 212 for (int x = 0; x < LEN; ++x) | |
| 213 if (std::abs(view[x] - pattern[x] * moduleSizeRef) > threshold) | |
| 214 return 0; | |
| 215 | |
| 216 return moduleSize; | |
| 217 } | |
| 218 | |
| 219 template <bool RELAXED_THRESHOLD = false, int N, int SUM> | |
| 220 double IsPattern(const PatternView& view, const FixedPattern<N, SUM, true>& pattern, int spaceInPixel = 0, | |
| 221 double minQuietZone = 0, double moduleSizeRef = 0) | |
| 222 { | |
| 223 // pattern contains the indices with the bars/spaces that need to be equally wide | |
| 224 double width = 0; | |
| 225 for (int x = 0; x < SUM; ++x) | |
| 226 width += view[pattern[x]]; | |
| 227 | |
| 228 const auto moduleSize = width / SUM; | |
| 229 | |
| 230 if (minQuietZone && spaceInPixel < minQuietZone * moduleSize - 1) | |
| 231 return 0; | |
| 232 | |
| 233 if (!moduleSizeRef) | |
| 234 moduleSizeRef = moduleSize; | |
| 235 | |
| 236 // the offset of 0.5 is to make the code less sensitive to quantization errors for small (near 1) module sizes. | |
| 237 // TODO: review once we have upsampling in the binarizer in place. | |
| 238 const auto threshold = moduleSizeRef * (0.5 + RELAXED_THRESHOLD * 0.25) + 0.5; | |
| 239 | |
| 240 for (int x = 0; x < SUM; ++x) | |
| 241 if (std::abs(view[pattern[x]] - moduleSizeRef) > threshold) | |
| 242 return 0; | |
| 243 | |
| 244 return moduleSize; | |
| 245 } | |
| 246 | |
| 247 template <int N, int SUM, bool IS_SPARCE> | |
| 248 bool IsRightGuard(const PatternView& view, const FixedPattern<N, SUM, IS_SPARCE>& pattern, double minQuietZone, | |
| 249 double moduleSizeRef = 0) | |
| 250 { | |
| 251 int spaceInPixel = view.isAtLastBar() ? std::numeric_limits<int>::max() : *view.end(); | |
| 252 return IsPattern(view, pattern, spaceInPixel, minQuietZone, moduleSizeRef) != 0; | |
| 253 } | |
| 254 | |
| 255 template<int LEN, typename Pred> | |
| 256 PatternView FindLeftGuard(const PatternView& view, int minSize, Pred isGuard) | |
| 257 { | |
| 258 if (view.size() < minSize) | |
| 259 return {}; | |
| 260 | |
| 261 auto window = view.subView(0, LEN); | |
| 262 if (window.isAtFirstBar() && isGuard(window, std::numeric_limits<int>::max())) | |
| 263 return window; | |
| 264 for (auto end = view.end() - minSize; window.data() < end; window.skipPair()) | |
| 265 if (isGuard(window, window[-1])) | |
| 266 return window; | |
| 267 | |
| 268 return {}; | |
| 269 } | |
| 270 | |
| 271 template <int LEN, int SUM, bool IS_SPARCE> | |
| 272 PatternView FindLeftGuard(const PatternView& view, int minSize, const FixedPattern<LEN, SUM, IS_SPARCE>& pattern, | |
| 273 double minQuietZone) | |
| 274 { | |
| 275 return FindLeftGuard<LEN>(view, std::max(minSize, LEN), | |
| 276 [&pattern, minQuietZone](const PatternView& window, int spaceInPixel) { | |
| 277 return IsPattern(window, pattern, spaceInPixel, minQuietZone); | |
| 278 }); | |
| 279 } | |
| 280 | |
| 281 template <int LEN> | |
| 282 std::array<int, LEN - 2> NormalizedE2EPattern(const PatternView& view, int mods, bool reverse = false) | |
| 283 { | |
| 284 double moduleSize = static_cast<double>(view.sum(LEN)) / mods; | |
| 285 std::array<int, LEN - 2> e2e; | |
| 286 | |
| 287 for (int i = 0; i < LEN - 2; i++) { | |
| 288 int i_v = reverse ? LEN - 2 - i : i; | |
| 289 double v = (view[i_v] + view[i_v + 1]) / moduleSize; | |
| 290 e2e[i] = int(v + .5); | |
| 291 } | |
| 292 | |
| 293 return e2e; | |
| 294 } | |
| 295 | |
| 296 template <int LEN, int SUM> | |
| 297 std::array<int, LEN> NormalizedPattern(const PatternView& view) | |
| 298 { | |
| 299 double moduleSize = static_cast<double>(view.sum(LEN)) / SUM; | |
| 300 #if 1 | |
| 301 int err = SUM; | |
| 302 std::array<int, LEN> is; | |
| 303 std::array<double, LEN> rs; | |
| 304 for (int i = 0; i < LEN; i++) { | |
| 305 double v = view[i] / moduleSize; | |
| 306 is[i] = int(v + .5); | |
| 307 rs[i] = v - is[i]; | |
| 308 err -= is[i]; | |
| 309 } | |
| 310 | |
| 311 if (std::abs(err) > 1) | |
| 312 return {}; | |
| 313 | |
| 314 if (err) { | |
| 315 auto mi = err > 0 ? std::max_element(std::begin(rs), std::end(rs)) - std::begin(rs) | |
| 316 : std::min_element(std::begin(rs), std::end(rs)) - std::begin(rs); | |
| 317 is[mi] += err; | |
| 318 rs[mi] -= err; | |
| 319 } | |
| 320 #else | |
| 321 std::array<int, LEN> is, e2e; | |
| 322 int min_v = view[0], min_i = 0; | |
| 323 | |
| 324 for (int i = 1; i < LEN; i++) { | |
| 325 double v = (view[i - 1] + view[i]) / moduleSize; | |
| 326 e2e[i] = int(v + .5); | |
| 327 if (view[i] < min_v) { | |
| 328 min_v = view[i]; | |
| 329 min_i = i; | |
| 330 } | |
| 331 } | |
| 332 | |
| 333 is[min_i] = 1; | |
| 334 for (int i = min_i + 1; i < LEN; ++i) | |
| 335 is[i] = e2e[i] - is[i - 1]; | |
| 336 for (int i = min_i - 1; i >= 0; --i) | |
| 337 is[i] = e2e[i + 1] - is[i + 1]; | |
| 338 #endif | |
| 339 return is; | |
| 340 } | |
| 341 | |
| 342 template<typename I> | |
| 343 void GetPatternRow(Range<I> b_row, PatternRow& p_row) | |
| 344 { | |
| 345 // TODO: if reactivating the bit-packed array (!ZX_FAST_BIT_STORAGE) should be of interest then the following code could be | |
| 346 // considerably speed up by using a specialized variant along the lines of the old BitArray::getNextSetTo() function that | |
| 347 // was removed between 1.4 and 2.0. | |
| 348 | |
| 349 #if 0 | |
| 350 p_row.reserve(64); | |
| 351 p_row.clear(); | |
| 352 | |
| 353 auto lastPos = b_row.begin(); | |
| 354 if (*lastPos) | |
| 355 p_row.push_back(0); // first value is number of white pixels, here 0 | |
| 356 | |
| 357 for (auto p = b_row.begin() + 1; p < b_row.end(); ++p) | |
| 358 if (bool(*p) != bool(*lastPos)) | |
| 359 p_row.push_back(p - std::exchange(lastPos, p)); | |
| 360 | |
| 361 p_row.push_back(b_row.end() - lastPos); | |
| 362 | |
| 363 if (*lastPos) | |
| 364 p_row.push_back(0); // last value is number of white pixels, here 0 | |
| 365 #else | |
| 366 p_row.resize(b_row.size() + 2); | |
| 367 std::fill(p_row.begin(), p_row.end(), 0); | |
| 368 | |
| 369 auto bitPos = b_row.begin(); | |
| 370 const auto bitPosEnd = b_row.end(); | |
| 371 auto intPos = p_row.data(); | |
| 372 | |
| 373 if (*bitPos) | |
| 374 intPos++; // first value is number of white pixels, here 0 | |
| 375 | |
| 376 // The following code as been observed to cause a speedup of up to 30% on large images on an AVX cpu | |
| 377 // and on an a Google Pixel 3 Android phone. Your mileage may vary. | |
| 378 if constexpr (std::is_pointer_v<I> && sizeof(I) == 8 && sizeof(std::remove_pointer_t<I>) == 1) { | |
| 379 using simd_t = uint64_t; | |
| 380 while (bitPos < bitPosEnd - sizeof(simd_t)) { | |
| 381 auto asSimd0 = BitHacks::LoadU<simd_t>(bitPos); | |
| 382 auto asSimd1 = BitHacks::LoadU<simd_t>(bitPos + 1); | |
| 383 auto z = asSimd0 ^ asSimd1; | |
| 384 if (z) { | |
| 385 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | |
| 386 int step = BitHacks::NumberOfTrailingZeros(z) / 8 + 1; | |
| 387 #else | |
| 388 int step = BitHacks::NumberOfLeadingZeros(z) / 8 + 1; | |
| 389 #endif | |
| 390 (*intPos++) += step; | |
| 391 bitPos += step; | |
| 392 } else { | |
| 393 (*intPos) += sizeof(simd_t); | |
| 394 bitPos += sizeof(simd_t); | |
| 395 } | |
| 396 } | |
| 397 } | |
| 398 | |
| 399 while (++bitPos != bitPosEnd) { | |
| 400 ++(*intPos); | |
| 401 intPos += bitPos[0] != bitPos[-1]; | |
| 402 } | |
| 403 ++(*intPos); | |
| 404 | |
| 405 if (bitPos[-1]) | |
| 406 intPos++; | |
| 407 | |
| 408 p_row.resize(intPos - p_row.data() + 1); | |
| 409 #endif | |
| 410 } | |
| 411 | |
| 412 } // ZXing |
