RawSpeed
fast raw decoding library
Loading...
Searching...
No Matches
IiqDecoder.cpp
Go to the documentation of this file.
1/*
2 RawSpeed - RAW file decoder.
3
4 Copyright (C) 2009-2014 Klaus Post
5 Copyright (C) 2014-2015 Pedro CĂ´rte-Real
6 Copyright (C) 2017-2019 Roman Lebedev
7 Copyright (C) 2019 Robert Bridge
8
9 This library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Lesser General Public
11 License as published by the Free Software Foundation; either
12 version 2 of the License, or (at your option) any later version.
13
14 This library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public
20 License along with this library; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22*/
23
24#include "decoders/IiqDecoder.h"
25#include "adt/Array2DRef.h"
26#include "adt/Casts.h"
27#include "adt/Mutex.h"
28#include "adt/Optional.h"
29#include "adt/Point.h"
30#include "common/Common.h"
31#include "common/RawImage.h"
32#include "common/Spline.h"
33#include "decoders/RawDecoder.h"
36#include "io/Buffer.h"
37#include "io/ByteStream.h"
38#include "io/Endianness.h"
39#include "metadata/Camera.h"
42#include "tiff/TiffIFD.h"
43#include <algorithm>
44#include <array>
45#include <cassert>
46#include <cinttypes>
47#include <cmath>
48#include <cstdint>
49#include <cstdlib>
50#include <functional>
51#include <iterator>
52#include <memory>
53#include <string>
54#include <utility>
55#include <vector>
56
57namespace rawspeed {
58
60 const DataBuffer db(file, Endianness::little);
61
62 // The IIQ magic. Is present for all IIQ raws.
63 return db.get<uint32_t>(8) == 0x49494949;
64}
65
67 const auto id = rootIFD->getID();
68 const std::string& make = id.make;
69
71 (make == "Phase One A/S" || make == "Phase One" || make == "Leaf");
72}
73
74// FIXME: this is very close to SamsungV0Decompressor::computeStripes()
75std::vector<PhaseOneStrip>
76IiqDecoder::computeSripes(Buffer raw_data, std::vector<IiqOffset> offsets,
77 uint32_t height) {
78 assert(height > 0);
79 assert(offsets.size() == (1 + height));
80
82
83 // so... here's the thing. offsets are not guaranteed to be in
84 // monotonically increasing order. so for each element of 'offsets',
85 // we need to find element which specifies next larger offset.
86 // and only then by subtracting those two offsets we get the slice size.
87
88 std::sort(offsets.begin(), offsets.end(),
89 [](const IiqOffset& a, const IiqOffset& b) {
90 if (a.offset == b.offset && &a != &b)
91 ThrowRDE("Two identical offsets found. Corrupt raw.");
92 return a.offset < b.offset;
93 });
94
95 std::vector<PhaseOneStrip> slices;
96 slices.reserve(height);
97
98 auto offset_iterator = std::begin(offsets);
99 bs.skipBytes(offset_iterator->offset);
100
101 auto next_offset_iterator = std::next(offset_iterator);
102 while (next_offset_iterator < std::end(offsets)) {
103 assert(next_offset_iterator->offset > offset_iterator->offset);
104 const auto size = next_offset_iterator->offset - offset_iterator->offset;
105 assert(size > 0);
106
107 slices.emplace_back(offset_iterator->n, bs.getStream(size));
108
109 std::advance(offset_iterator, 1);
110 std::advance(next_offset_iterator, 1);
111 }
112
113 assert(slices.size() == height);
114
115 return slices;
116}
117
118namespace {
119
129
131 switch (v) {
132 using enum IIQFormat;
133 case 1:
134 return RAW_1;
135 case 2:
136 return RAW_2;
137 case 3:
138 return IIQ_L;
139 case 5:
140 return IIQ_S;
141 case 6:
142 return IIQ_Sv2;
143 case 8:
144 return IIQ_L16;
145 default:
146 return std::nullopt;
147 }
148}
149
150} // namespace
151
152enum class IiqDecoder::IiqCorr : uint8_t { LUMA, CHROMA };
153
155 const Buffer buf(mFile.getSubView(8));
157 ByteStream bs(db);
158
159 bs.skipBytes(4); // Phase One magic
160 bs.skipBytes(4); // padding?
161
162 const auto origPos = bs.getPosition();
163
164 const uint32_t entries_offset = bs.getU32();
165
166 bs.setPosition(entries_offset);
167
168 const uint32_t entries_count = bs.getU32();
169 bs.skipBytes(4); // ???
170
171 // this is how much is to be read for all the entries
172 ByteStream es(bs.getStream(entries_count, 16));
173
174 bs.setPosition(origPos);
175
176 uint32_t width = 0;
177 uint32_t height = 0;
178 uint32_t split_row = 0;
179 uint32_t split_col = 0;
180
181 Optional<IIQFormat> format;
182 Optional<Buffer> raw_data;
183 ByteStream block_offsets;
184 ByteStream wb;
185 ByteStream correction_meta_data;
186
187 for (uint32_t entry = 0; entry < entries_count; entry++) {
188 const uint32_t tag = es.getU32();
189 es.skipBytes(4); // type
190 const uint32_t len = es.getU32();
191 const uint32_t data = es.getU32();
192
193 switch (tag) {
194 case 0x107:
195 wb = bs.getSubStream(data, len);
196 break;
197 case 0x108:
198 width = data;
199 break;
200 case 0x109:
201 height = data;
202 break;
203 case 0x10e: // RawFormat
204 if (format)
205 ThrowRDE("Duplicate RawFormat tag.");
206 format = getAsIIQFormat(data);
207 if (!format || *format != IIQFormat::IIQ_L)
208 ThrowRDE("Unsupported RawFormat: %u", data);
209 break;
210 case 0x10f:
211 raw_data = bs.getSubView(data, len);
212 break;
213 case 0x110:
214 correction_meta_data = bs.getSubStream(data);
215 break;
216 case 0x21c:
217 // they are not guaranteed to be sequential!
218 block_offsets = bs.getSubStream(data, len);
219 break;
220 case 0x21d:
221 // 16-bit black level adapted to 14-bit raw data (IIQFormat::IIQ_L)
222 black_level = data >> 2;
223 break;
224 case 0x222:
225 split_col = data;
226 break;
227 case 0x224:
228 split_row = data;
229 break;
230 default:
231 // FIXME: is there a "block_sizes" entry?
232 break;
233 }
234 }
235
236 // FIXME: could be wrong. max "active pixels" in "Sensor+" mode - "101 MP"
237 if (width == 0 || height == 0 || width > 11976 || height > 8854)
238 ThrowRDE("Unexpected image dimensions found: (%u; %u)", width, height);
239
240 if (!format)
241 ThrowRDE("Unspecified RawFormat");
242
243 if (!raw_data)
244 ThrowRDE("No raw data found");
245
246 if (split_col > width || split_row > height)
247 ThrowRDE("Invalid sensor quadrant split values (%u, %u)", split_row,
248 split_col);
249
250 block_offsets = block_offsets.getStream(height, sizeof(uint32_t));
251
252 std::vector<IiqOffset> offsets;
253 offsets.reserve(1 + height);
254
255 for (uint32_t row = 0; row < height; row++)
256 offsets.emplace_back(row, block_offsets.getU32());
257
258 // to simplify slice size calculation, we insert a dummy offset,
259 // which will be used much like end()
260 offsets.emplace_back(height, raw_data->getSize());
261
262 std::vector<PhaseOneStrip> strips(
263 computeSripes(*raw_data, std::move(offsets), height));
264
265 mRaw->dim = iPoint2D(width, height);
266
267 PhaseOneDecompressor p(mRaw, std::move(strips));
268 mRaw->createData();
269 p.decompress();
270
271 if (correction_meta_data.getSize() != 0 && iiq)
272 CorrectPhaseOneC(correction_meta_data, split_row, split_col);
273
274 std::array<float, 4> wbCoeffs = {};
275 for (int i = 0; i < 3; i++)
276 wbCoeffs[i] = wb.getFloat();
277 mRaw->metadata.wbCoeffs = wbCoeffs;
278
279 return mRaw;
280}
281
283 uint32_t split_col) const {
284 meta_data.skipBytes(8);
285 const uint32_t bytes_to_entries = meta_data.getU32();
286 meta_data.setPosition(bytes_to_entries);
287 const uint32_t entries_count = meta_data.getU32();
288 meta_data.skipBytes(4);
289
290 // this is how much is to be read for all the entries
291 ByteStream entries(meta_data.getStream(entries_count, 12));
292 meta_data.setPosition(0);
293
294 bool QuadrantMultipliersSeen = false;
295 bool SensorDefectsSeen = false;
296
297 for (uint32_t entry = 0; entry < entries_count; entry++) {
298 const uint32_t tag = entries.getU32();
299 const uint32_t len = entries.getU32();
300 const uint32_t offset = entries.getU32();
301
302 switch (tag) {
303 case 0x400: // Sensor Defects
304 if (SensorDefectsSeen)
305 ThrowRDE("Second sensor defects entry seen. Unexpected.");
306 correctSensorDefects(meta_data.getSubStream(offset, len));
307 SensorDefectsSeen = true;
308 break;
309 case 0x40b: // Chroma calibration
310 PhaseOneFlatField(meta_data.getSubStream(offset, len), IiqCorr::CHROMA);
311 break;
312 case 0x410: // Luma calibration
313 PhaseOneFlatField(meta_data.getSubStream(offset, len), IiqCorr::LUMA);
314 break;
315 case 0x431:
316 if (QuadrantMultipliersSeen)
317 ThrowRDE("Second quadrant multipliers entry seen. Unexpected.");
318 if (iiq.quadrantMultipliers)
320 split_row, split_col);
321 QuadrantMultipliersSeen = true;
322 break;
323 default:
324 break;
325 }
326 }
327}
328
329// This method defines a correction that compensates for the fact that
330// IIQ files may come from a camera with multiple (four, in this case)
331// sensors combined into a single "sensor." Because the different
332// sensors may have slightly different responses, we need to multiply
333// the pixels in each by a correction factor to ensure that they blend
334// together smoothly. The correction factor is not a single
335// multiplier, but a curve defined by seven control points. Each
336// curve's control points share the same seven X-coordinates.
338 uint32_t split_row,
339 uint32_t split_col) const {
340 std::array<uint32_t, 9> shared_x_coords;
341
342 // Read the middle seven points from the file
343 std::generate_n(std::next(shared_x_coords.begin()), 7,
344 [&data] { return data.getU32(); });
345
346 // All the curves include (0, 0) and (65535, 65535),
347 // so the first and last points are predefined
348 shared_x_coords.front() = 0;
349 shared_x_coords.back() = 65535;
350
351 // Check that the middle coordinates make sense.
352 if (std::adjacent_find(shared_x_coords.cbegin(), shared_x_coords.cend(),
353 std::greater_equal<>()) != shared_x_coords.cend())
354 ThrowRDE("The X coordinates must all be strictly increasing");
355
356 std::array<std::array<std::vector<iPoint2D>, 2>, 2> control_points;
357 for (auto& quadRow : control_points) {
358 for (auto& quadrant : quadRow) {
359 quadrant.reserve(9);
360 quadrant.emplace_back(0, 0);
361
362 for (int i = 1; i < 8; i++) {
363 // These multipliers are expressed in ten-thousandths in the
364 // file
365 const uint64_t y_coord =
366 (uint64_t(data.getU32()) * shared_x_coords[i]) / 10000ULL;
367 if (y_coord > 65535)
368 ThrowRDE("The Y coordinate %" PRIu64 " is too large", y_coord);
369 quadrant.emplace_back(shared_x_coords[i], y_coord);
370 }
371
372 quadrant.emplace_back(65535, 65535);
373 assert(quadrant.size() == 9);
374 }
375 }
376
377 for (int quadRow = 0; quadRow < 2; quadRow++) {
378 for (int quadCol = 0; quadCol < 2; quadCol++) {
379 const Array2DRef<uint16_t> img(mRaw->getU16DataAsUncroppedArray2DRef());
380
381 const Spline<> s(control_points[quadRow][quadCol]);
382 const std::vector<uint16_t> curve = s.calculateCurve();
383
384 int row_start = quadRow == 0 ? 0 : split_row;
385 int row_end = quadRow == 0 ? split_row : img.height();
386 int col_start = quadCol == 0 ? 0 : split_col;
387 int col_end = quadCol == 0 ? split_col : img.width();
388
389 for (int row = row_start; row < row_end; row++) {
390 for (int col = col_start; col < col_end; col++) {
391 uint16_t& pixel = img(row, col);
392 // This adjustment is expected to be made with the
393 // black-level already subtracted from the pixel values.
394 // Because this is kept as metadata and not subtracted at
395 // this point, to make the correction work we subtract the
396 // appropriate amount before indexing into the curve and
397 // then add it back so that subtracting the black level
398 // later will work as expected
399 const uint16_t diff = pixel < black_level
400 ? pixel
402 pixel = curve[pixel - diff] + diff;
403 }
404 }
405 }
406 }
407}
408
409// Luma and chroma calibration to eliminate remaining paneling artefacts,
410// needed in addition to CorrectQuadrantMultipliersCombined().
411// -- Based on phase_one_flat_field() in dcraw.c by Dave Coffin
413 const Array2DRef<uint16_t> img(mRaw->getU16DataAsUncroppedArray2DRef());
414
415 int nc = [corr]() {
416 switch (corr) {
417 case IiqCorr::LUMA:
418 return 2;
419 case IiqCorr::CHROMA:
420 return 4;
421 }
422 ThrowRDE("Unsupported IIQ correction");
423 }();
424
425 std::array<uint16_t, 8> head;
426 for (int i = 0; i < 8; i++)
427 head[i] = data.getU16();
428
429 if (head[2] == 0 || head[3] == 0 || head[4] == 0 || head[5] == 0)
430 return;
431
432 auto wide = implicit_cast<int>(roundUpDivisionSafe(head[2], head[4]));
433 auto high = implicit_cast<int>(roundUpDivisionSafe(head[3], head[5]));
434
435 std::vector<float> mrow_storage;
437 mrow_storage, /*width=*/wide * nc, /*height=*/1);
438 mrow = Array2DRef<float>(mrow_storage.data(), /*width=*/nc, /*height=*/wide);
439
440 for (int y = 0; y < high; y++) {
441 for (int x = 0; x < wide; x++) {
442 for (int c = 0; c < nc; c += 2) {
443 float num = data.getU16() / 32768.0F;
444 if (y == 0)
445 mrow(x, c) = num;
446 else
447 mrow(x, c + 1) = (num - mrow(x, c)) / head[5];
448 }
449 }
450 if (y == 0)
451 continue;
452 for (int rend = head[1] + (y * head[5]), row = rend - head[5];
453 row < mRaw->dim.y && row < rend && row < (head[1] + head[3] - head[5]);
454 row++) {
455 for (int x = 1; x < wide; x++) {
456 std::array<float, 4> mult;
457 for (int c = 0; c < nc; c += 2) {
458 mult[c] = mrow(x - 1, c);
459 mult[c + 1] = (mrow(x, c) - mult[c]) / head[4];
460 }
461 for (int cend = head[0] + (x * head[4]), col = cend - head[4];
462 col < mRaw->dim.x && col < cend &&
463 col < head[0] + head[2] - head[4];
464 col++) {
465 if (int c =
466 nc > 2 ? static_cast<unsigned>(mRaw->cfa.getColorAt(row, col))
467 : 0;
468 !(c & 1)) {
469 auto val = implicit_cast<unsigned>(img(row, col) * mult[c]);
470 img(row, col) = implicit_cast<uint16_t>(std::min(val, 0xFFFFU));
471 }
472 for (int c = 0; c < nc; c += 2)
473 mult[c] += mult[c + 1];
474 }
475 }
476 for (int x = 0; x < wide; x++)
477 for (int c = 0; c < nc; c += 2)
478 mrow(x, c) += mrow(x, c + 1);
479 }
480 }
481}
482
484 checkCameraSupported(meta, mRootIFD->getID(), "");
485
486 auto id = mRootIFD->getID();
487 const Camera* cam = meta->getCamera(id.make, id.model, mRaw->metadata.mode);
488 if (!cam)
489 ThrowRDE("Couldn't find camera %s %s", id.make.c_str(), id.model.c_str());
490
491 mRaw->cfa = cam->cfa;
492}
493
495 setMetaData(meta, "", 0);
496
497 if (black_level)
498 mRaw->blackLevel = black_level;
499}
500
502 while (data.getRemainSize() != 0) {
503 const uint16_t col = data.getU16();
504 const uint16_t row = data.getU16();
505 const uint16_t type = data.getU16();
506 data.skipBytes(2); // Ignore unknown/unused bits.
507
508 if (col >= mRaw->dim.x) // Value for col is outside the raw image.
509 continue;
510 switch (type) {
511 case 131: // bad column
512 case 137: // bad column
513 correctBadColumn(col);
514 break;
515 case 129: // bad pixel
516 handleBadPixel(col, row);
517 break;
518 default: // Oooh, a sensor defect not in dcraw!
519 break;
520 }
521 }
522}
523
524void IiqDecoder::handleBadPixel(const uint16_t col, const uint16_t row) const {
525 MutexLocker guard(&mRaw->mBadPixelMutex);
526 mRaw->mBadPixelPositions.insert(mRaw->mBadPixelPositions.end(),
527 (static_cast<uint32_t>(row) << 16) + col);
528}
529
531 const Array2DRef<uint16_t> img(mRaw->getU16DataAsUncroppedArray2DRef());
532
533 for (int row = 2; row < mRaw->dim.y - 2; row++) {
534 if (mRaw->cfa.getColorAt(col, row) == CFAColor::GREEN) {
535 /* Do green pixels. Let's pretend we are in "G" pixel, in the middle:
536 * G=G
537 * BGB
538 * G0G
539 * We accumulate the values 4 "G" pixels form diagonals, then check which
540 * of 4 values is most distant from the mean of those 4 values, subtract
541 * it from the sum, average (divide by 3) and round to nearest int.
542 */
543 int max = 0;
544 std::array<uint16_t, 4> val;
545 std::array<int32_t, 4> dev;
546 int32_t sum = 0;
547 sum += val[0] = img(row - 1, col - 1);
548 sum += val[1] = img(row + 1, col - 1);
549 sum += val[2] = img(row - 1, col + 1);
550 sum += val[3] = img(row + 1, col + 1);
551 for (int i = 0; i < 4; i++) {
552 dev[i] = std::abs((val[i] * 4) - sum);
553 if (dev[max] < dev[i])
554 max = i;
555 }
556 const int three_pixels = sum - val[max];
557 // This is `std::lround(three_pixels / 3.0)`, but without FP.
558 img(row, col) = implicit_cast<uint16_t>((three_pixels + 1) / 3);
559 } else {
560 /*
561 * Do non-green pixels. Let's pretend we are in "R" pixel, in the middle:
562 * RG=GR
563 * GB=BG
564 * RGRGR
565 * GB0BG
566 * RG0GR
567 * We have 6 other "R" pixels - 2 by horizontal, 4 by diagonals.
568 * We need to combine them, to get the value of the pixel we are in.
569 */
570 uint32_t diags = img(row + 2, col - 2) + img(row - 2, col - 2) +
571 img(row + 2, col + 2) + img(row - 2, col + 2);
572 uint32_t horiz = img(row, col - 2) + img(row, col + 2);
573 // But this is not just averaging, we bias towards the horizontal pixels.
574 img(row, col) = implicit_cast<uint16_t>(
575 std::lround((diags * 0.0732233) + (horiz * 0.3535534)));
576 }
577 }
578}
579
580} // namespace rawspeed
#define s
#define ThrowRDE(...)
assert(dim.area() >=area)
dim y
Definition Common.cpp:51
dim x
Definition Common.cpp:50
bool checkCameraSupported(const CameraMetaData *meta, const TiffID &id, const std::string &mode)
void setMetaData(const CameraMetaData *meta, const TiffID &id, const std::string &mode, int iso_speed)
int RAWSPEED_READONLY height() const
static Array2DRef< T > create(std::vector< cvless_value_type, AllocatorType > &storage, int width, int height)
Definition Array2DRef.h:98
int RAWSPEED_READONLY width() const
Buffer getSubView(size_type offset, size_type size_) const
Definition Buffer.h:78
size_type RAWSPEED_READONLY getSize() const
Definition Buffer.h:115
size_type RAWSPEED_READONLY getRemainSize() const
Definition ByteStream.h:87
void setPosition(size_type newPos)
Definition ByteStream.h:83
ByteStream getSubStream(size_type offset, size_type size_) const
Definition ByteStream.h:54
ByteStream getStream(size_type size_)
Definition ByteStream.h:119
void skipBytes(size_type nbytes)
Definition ByteStream.h:130
size_type getPosition() const
Definition ByteStream.h:78
ColorFilterArray cfa
Definition Camera.h:103
const Camera * getCamera(const std::string &make, const std::string &model, const std::string &mode) const
T get(size_type offset, size_type index=0) const
Definition Buffer.h:147
RawImage decodeRawInternal() override
void handleBadPixel(uint16_t col, uint16_t row) const
void decodeMetaDataInternal(const CameraMetaData *meta) override
static bool isAppropriateDecoder(Buffer file)
void checkSupportInternal(const CameraMetaData *meta) override
static std::vector< PhaseOneStrip > computeSripes(Buffer raw_data, std::vector< IiqOffset > offsets, uint32_t height)
void correctSensorDefects(ByteStream data) const
void PhaseOneFlatField(ByteStream data, IiqCorr corr) const
void CorrectQuadrantMultipliersCombined(ByteStream data, uint32_t split_row, uint32_t split_col) const
void correctBadColumn(uint16_t col) const
void CorrectPhaseOneC(ByteStream meta_data, uint32_t split_row, uint32_t split_col) const
struct rawspeed::RawDecoder::@224365236145247220354374363215210370336104332222 iiq
TiffID getID() const
Definition TiffIFD.cpp:325
Optional< IIQFormat > getAsIIQFormat(uint32_t v)
constexpr uint64_t RAWSPEED_READNONE roundUpDivisionSafe(uint64_t value, uint64_t div)
Definition Common.h:145
constexpr RAWSPEED_READNONE Ttgt implicit_cast(Tsrc value)
Definition Casts.h:32
Array2DRef(Array1DRef< T > data, int width, int height, int pitch) -> Array2DRef< T >
void RAWSPEED_UNLIKELY_FUNCTION RAWSPEED_NOINLINE static char buf[bufSize]
std::string make
Definition TiffIFD.h:134