summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlon Levy <alevy@redhat.com>2014-06-01 10:25:00 +0300
committerAlon Levy <alevy@redhat.com>2014-06-01 10:25:00 +0300
commit71ef985b297d3feb61ad0ef599135db661dd43b3 (patch)
tree6ff06b6f2532541cf44c2509cdf9f152417aae73
parent8ec51bb90779a0692853d5190e7feba9cd8bfb20 (diff)
wip: rust implementation of glz encoderglz
-rw-r--r--common/glz_encoder.rs1327
-rw-r--r--common/rust_translation_notes.txt56
2 files changed, 1383 insertions, 0 deletions
diff --git a/common/glz_encoder.rs b/common/glz_encoder.rs
new file mode 100644
index 0000000..5583bac
--- /dev/null
+++ b/common/glz_encoder.rs
@@ -0,0 +1,1327 @@
+extern crate libc;
+use libc::uint32_t;
+use libc::uint8_t;
+use libc::size_t;
+
+#![feature(macro_rules)]
+#[allow(non_camel_case_types)]
+
+/* includes */
+
+/* dictionary */
+
+//#define CHAINED_HASH
+
+#[cfg(chained_hash)]
+static HASH_SIZE_LOG: int = 16;
+
+#[cfg(chained_hash)]
+static HASH_CHAIN_SIZE: int = 4;
+
+#[cfg(not(chained_hash))]
+static HASH_SIZE_LOG: int = 20;
+
+#[cfg(not(chained_hash))]
+static HASH_CHAIN_SIZE: int = 1;
+
+static HASH_SIZE: int = 1 << HASH_SIZE_LOG;
+static HASH_MASK: int = HASH_SIZE - 1;
+
+/* Interface for using the dictionary for encoding.
+ Data structures are exposed for the encoder for efficiency
+ purposes. */
+struct WindowImage {
+ id: uint64_t id,
+ type_: LzImageType,
+ size: c_int, // in pixels
+ first_seg: uint32_t first_seg;
+ usr_context: *GlzUsrImageContext,
+ next: *WindowImage, // TODO: Box
+ is_alive: uint8_t,
+}
+
+static MAX_IMAGE_SEGS_NUM: int = 0xffffffff;
+static NULL_IMAGE_SEG_ID: int = MAX_IMAGE_SEGS_NUM;
+static INIT_IMAGE_SEGS_NUM: int = 1000;
+
+/* Images can be separated into several chunks. The basic unit of the
+ dictionary window is one image segment. Each segment is encoded separately.
+ An encoded match can refer to only one segment.*/
+struct WindowImageSegment {
+ image: *WindowImage,
+ lines: *uint8_t,
+ lines_end: *uint8_t,
+ pixels_num: uint32_t, // Number of pixels in the segment
+ pixels_so_far: uint64_t, // Total no. pixels passed through the window till this segment.
+ // NOTE - never use size delta independently. It should
+ // always be used with respect to a previous size delta
+ next: uint32_t,
+}
+
+
+#[packed]
+struct HashEntry {
+ image_seg_idx: uint32_t,
+ ref_pix_idx: uint32_t,
+}
+
+#[cfg(chained_hash)]
+struct SharedDictionaryHash {
+ HashEntry htab[HASH_SIZE][HASH_CHAIN_SIZE];
+ uint8_t htab_counter[HASH_SIZE]; //cyclic counter for the next entry in a chain to be assigned
+}
+
+#[cfg(not(chained_hash))]
+struct SharedDictionaryHash {
+ htab: HashEntry[HASH_SIZE],
+}
+
+// TODO: open rust issue on nested structs. don't see why not to have this.
+struct SharedDictionaryWindow {
+ /* The segments storage. A dynamic array.
+ By referring to a segment by its index, instead of address,
+ we save space in the hash entries (32bit instead of 64bit) */
+ segs: &WindowImageSegment,
+ segs_quota: uint32_t,
+
+ /* The window is manged as a linked list rather than as a cyclic
+ array in order to keep the indices of the segments consistent
+ after reallocation */
+
+ /* the window in a resolution of image segments */
+ used_segs_head: uint32_t, // the latest head
+ used_segs_tail: uint32_t,
+ free_segs_head: uint32_t,
+
+ encoders_heads: *uint32_t, // Holds for each encoder (by id), the window head when
+ // it started the encoding.
+ // The head is NULL_IMAGE_SEG_ID when the encoder is
+ // not encoding.
+
+ /* the window in a resolution of images. But here the head contains the oldest head*/
+ used_images_tail: *WindowImage,
+ used_images_head: *WindowImage,
+ free_images: *WindowImage,
+
+ pixels_so_far: uint64_t,
+ size_limit: uint32_t, // max number of pixels in a window (per encoder)
+}
+
+struct SharedDictionary {
+ window: SharedDictionaryWindow,
+ /* Concurrency issues: the reading/writing of each entry field should be atomic.
+ It is allowed that the reading/writing of the whole entry won't be atomic,
+ since before we access a reference we check its validity*/
+
+ // NOTE: due to the way cfg works in rust we use a substruct. an alternative would have been a macro.
+ SharedDictionaryHash hash;
+
+ last_image_id: uint64_t,
+ max_encoders: uint32_t,
+ lock: pthread_mutex_t,
+ rw_alloc_lock: pthread_rwlock_t,
+ cur_usr: *GlzEncoderUsrContext, // each encoder has other context.
+}
+
+/*
+ Add the image to the tail of the window.
+ If possible, release images from the head of the window.
+ Also perform concurrency related operations.
+
+ usr_image_context: when an image is released from the window due to capacity overflow,
+ usr_image_context is given as a parameter to the free_image callback.
+
+ image_head_dist : the number of images between the current image and the head of the
+ window that is associated with the encoder.
+*/
+fn glz_dictionary_pre_encode(encoder_id: uint32_t, usr: *GlzEncoderUsrContext,
+ dict: *SharedDictionary, image_type: LzImageType,
+ image_width: c_int, image_height: c_int, image_stride: c_int,
+ first_lines: *uint8_t, num_first_lines: c_uint,
+ usr_image_context: *GlzUsrImageContext,
+ uint32_t *image_head_dist) -> *WindowImage;
+
+/*
+ Performs concurrency related operations.
+ If possible, release images from the head of the window.
+*/
+fn glz_dictionary_post_encode(encoder_id: uint32_t, usr: *GlzEncoderUsrContext,
+ dict: *SharedDictionary);
+
+/* rust: only required in glz_encoder_dictionary.c
+#define IMAGE_SEG_IS_EARLIER(dict, dst_seg, src_seg) ( \
+ ((src_seg) == NULL_IMAGE_SEG_ID) || (((dst_seg) != NULL_IMAGE_SEG_ID) \
+ && ((dict)->window.segs[(dst_seg)].pixels_so_far < \
+ (dict)->window.segs[(src_seg)].pixels_so_far)))
+*/
+
+#[cfg(chained_hash)]
+fn update_hash(dict, hval, seg: uint32_t, pix: uint32_t) {
+ uint8_t tmp_count = (dict).htab_counter[hval];
+ (dict).htab[hval][tmp_count].image_seg_idx = seg;
+ (dict).htab[hval][tmp_count].ref_pix_idx = pix;
+ tmp_count = ((tmp_count) + 1) & (HASH_CHAIN_SIZE - 1);
+ dict.htab_counter[hval] = tmp_count;
+}
+#[cfg(not(chained_hash))]
+fn update_hash(dict, hval, seg, pix) {
+ (dict).htab[hval].image_seg_idx = seg;
+ (dict).htab[hval].ref_pix_idx = pix;
+}
+
+/* checks if the reference segment is located in the range of the window
+ of the current encoder */
+fn ref_seg_is_valid(dict: &SharedDictionary, enc_id: u8, ref_seg: &WindowImageSegment,
+ src_seg: &WindowImageSegment)
+{
+ (ref_seg == src_seg) ||
+ (ref_seg.image &&
+ ref_seg.image.is_alive &&
+ (src_seg.image.type_ == ref_seg.image.type_) &&
+ (ref_seg.pixels_so_far <= src_seg.pixels_so_far) &&
+ (dict.window.segs[
+ dict.window.encoders_heads[enc_id]].pixels_so_far <=
+ ref_seg.pixels_so_far))
+}
+
+struct EncoderCurrentImage {
+ _type: LzImageType,
+ id: uint32_t,
+ uint32_t first_win_seg;
+}
+
+struct EncoderIO {
+ start: *uint8_t,
+ now: *uint8_t,
+ end: *uint8_t,
+ bytes_count: size_t,
+ last_copy: *uint8_t // pointer to the last byte in which copy count was written
+}
+
+/* Holds a specific data for one encoder, and data that is relevant for the current image encoded */
+struct Encoder {
+ usr: *GlzEncoderUsrContext,
+ id: u8,
+ dict: *SharedDictionary,
+
+ cur_image: EncoderCurrentImage,
+ io: EncoderIO,
+}
+
+/* encoder */
+
+struct GlzEncoderUsrContext {
+ error: extern "C" fn(usr: *GlzEncoderUsrContext, fmt: *c_char, ...),
+ warn: extern "C" fn(usr: *GlzEncoderUsrContext, fmt: *c_char, ...),
+ info: extern "C" fn(usr: *GlzEncoderUsrContext, fmt: *c_char, ...),
+ malloc: extern "C" fn(usr: *GlzEncoderUsrContext, size: c_int),
+ free: extern "C" fn(usr: *GlzEncoderUsrContext, void *ptr),
+
+ // get the next chunk of the image which is entered to the dictionary. If the image is down to
+ // top, return it from the last line to the first one (stride should always be positive)
+ more_lines: extern "C" fn(usr: *GlzEncoderUsrContext, lines: **uint8_t) -> c_int,
+
+ // get the next chunk of the compressed buffer.return number of bytes in the chunk.
+ more_space: extern "C" fn(usr: *GlzEncoderUsrContext, io_ptr: **uint8_t) -> c_int,
+
+ // called when an image is removed from the dictionary, due to the window size limit
+ free_image: extern "C" fn(usr: *GlzEncoderUsrContext, image: *GlzUsrImageContext),
+}
+
+type BYTE = uint8_t;
+
+#[packed]
+struct one_byte_pixel_t {
+ a: BYTE
+}
+
+#[packed]
+struct rgb32_pixel_t {
+ b: BYTE;
+ g: BYTE;
+ r: BYTE;
+ pad: BYTE;
+}
+
+#[packed]
+struct rgb24_pixel_t {
+ b: BYTE;
+ g: BYTE;
+ r: BYTE;
+}
+
+type rgb16_pixel_t = uint16_t;
+
+
+/* glz_encoder_tmpl.c */
+
+static HASH_LOG :int = 13;
+static HASH_SIZE :int = (1 << HASH_LOG);
+static HASH_MASK :int = (HASH_SIZE - 1);
+
+static DJB2_START :int = 5381;
+// TODO: should this be a macro? should produce the same result using a function.
+/*
+macro_rules! DJB2_HASH(
+ ($hash: ident, $c: ident) => ($hash = (($hash << 5) + $hash) ^ ($c)) //|{hash = ((hash << 5) + hash) + c;}
+)
+*/
+#[inline]
+fn djb2_hash(hash: int, c: BYTE) -> int {
+ ((hash << 5) + hash) ^ (c) //|{hash = ((hash << 5) + hash) + c;}
+}
+
+/*
+ For each pixel type the following macros are defined:
+ PIXEL : input type
+ FNAME(name)
+ ENCODE_PIXEL(encoder, pixel) : writing a pixel to the compressed buffer (byte by byte)
+ SAME_PIXEL(pix1, pix2) : comparing two pixels
+ HASH_FUNC(value, pix_ptr) : hash func of 3 consecutive pixels
+*/
+
+// This is required for the trait specialization where we read the alpha out of a rgb32_pixel_t array
+struct RgbAlpha(rgb32_pixel_t);
+
+trait Glz {
+ fn encode_pixel(&self, e: &Encoder);
+ fn same_pixel(&self, pix2: &Self) -> bool;
+ fn min_ref_encode_size(&self) -> int;
+ fn max_ref_encode_size(&self) -> int;
+ // TODO: should this be by ref? #[inline=always]? make sure equivalent code (benchmark)
+}
+
+trait GlzMatch {
+ fn same_pixel_run(&mut self, ref_: &mut Self) -> int;
+}
+
+trait GlzGetters {
+ fn get_r(&self) -> BYTE;
+ fn get_g(&self) -> BYTE;
+ fn get_b(&self) -> BYTE;
+}
+
+// Separate because we will be defining it over pointers, not raw pixels
+trait GlzHash {
+ fn hash_func(&self) -> int;
+}
+
+impl Glz for one_byte_pixel_t {
+//#define FNAME(name) glz_plt_##name
+ fn encode_pixel(&self, e: &Encoder) {
+ // gets the pixel and write only the needed bytes from the pixel
+ encode(e, self.a);
+ }
+ fn same_pixel(&self, pix2: &one_byte_pixel_t) {
+ self.a == pix2.a
+ }
+ fn min_ref_encode_size(&self) { 4 }
+ fn max_ref_encode_size(&self) { 7 }
+}
+
+trait GlzHash for *one_byte_pixel_t {
+ fn hash_func(&self) {
+ let mut ret = DJB2_START;
+ let p: &[one_byte_pixel_t] = transmute(Slice {data: self, len: 3});
+ ret = djb2_hash(ret, p[0].a);
+ ret = djb2_hash(ret, p[1].a);
+ ret = djb2_hash(ret, p[2].a);
+ ret & HASH_MASK
+ }
+}
+
+impl GlzMatch for &mut *one_byte_pixel_t {
+ fn same_pixel_run(&mut self, ref_: &mut *one_byte_pixel_t) -> bool {
+ if (!same_pixel(*self, *)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+ }
+}
+
+impl GlzMatch for *rgb16_pixel_t {
+ fn same_pixel_run(&self, pix2: *rgb16_pixel_t) -> bool {
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+#if defined(LZ_PLT) || defined(LZ_RGB_ALPHA)
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ return 0;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+
+#endif
+
+
+}
+
+impl Glz for RgbAlpha {
+//#define FNAME(name) glz_rgb_alpha_##name
+ fn encode_pixel(&self, e: &Encoder) {
+ match self {
+ RgbAlpha(p) => encode(e, p.pad);
+ }
+ }
+ fn same_pixel(&self, pix2: &RgbAlpha) {
+ match pix2 {
+ RgbAlpha(other) =>
+ match self {
+ RgbAlpha(me) =>
+ other.pad == me.pad
+ }
+ }
+ }
+ fn min_ref_encode_size(&self) { 4 }
+ fn max_ref_encode_size(&self) { 7 }
+}
+impl GlzHash for *RgbAlpha {
+ fn hash_funch(&self) {
+ match self {
+ RgbAlpha(rgb) => {
+ let mut ret = DJB2_START;
+ let p: &[rgb32_pixel_t] = transmute(Slice{data: rgb, len: 3});
+ ret = djb2_hash(ret, p[0].pad);
+ ret = djb2_hash(ret, p[1].pad);
+ ret = djb2_hash(ret, p[2].pad);
+ ret & HASH_MASK
+ }
+ }
+ }
+}
+
+fn same_pixel_rgb<T: GlzGetters>(p1: &T, p2: &T2) -> bool {
+ p1.get_r() == p2.get_r() && p1.get_g() == p2.get_g() && p1.get_b() == p2.get_b()
+}
+
+impl GlzGetters for rgb16_pixel_t {
+ fn get_r(&self) -> BYTE { (((self) >> 10) & 0x1f) }
+ fn get_g(&self) -> BYTE { (((self) >> 5) & 0x1f) }
+ fn get_b(&self) -> BYTE { ((pix) & 0x1f) }
+}
+
+impl Glz for rgb16_pixel_t {
+//#define FNAME(name) glz_rgb16_##name
+ fn encode_pixel(&self, e: &Encoder) {
+ encode(e, (self) >> 8);
+ encode(e, (self) & 0xff);
+ }
+ fn same_pixel(&self, pix2: &rgb16_pixel_t) -> bool {
+ same_pixel_rgb(self, pix2)
+ }
+ fn min_ref_encode_size(&self) -> int { 2 }
+ fn max_ref_encode_size(&self) -> int { 3 }
+}
+
+impl GlzHash for *rgb16_pixel_t {
+ fn hash_func(&self) -> int {
+ let mut v = DJB2_START;
+ let p: &[rgb16_pixel_t] = transmute(Slice{data: self, len: 3});
+ v = djb2_hash(v, p[0] & (0x00ff));
+ v = djb2_hash(v, (p[0] >> 8) & (0x007f));
+ v = djb2_hash(v, p[1] & (0x00ff));
+ v = djb2_hash(v, (p[1] >> 8) & (0x007f));
+ v = djb2_hash(v, p[2] & (0x00ff));
+ v = djb2_hash(v, (p[2] >> 8) & (0x007f));
+ v & HASH_MASK
+ }
+}
+
+fn hash_rgb<T: GlzGetters>(t: *[T]) -> int {
+ let mut v = DJB2_START;
+ let p: &[T] = transmute(Slice{data: t, len: 3});
+ v = djb2_hash(v, p[0].get_r());
+ v = djb2_hash(v, p[0].get_g());
+ v = djb2_hash(v, p[0].get_b());
+ v = djb2_hash(v, p[1].get_r());
+ v = djb2_hash(v, p[1].get_g());
+ v = djb2_hash(v, p[1].get_b());
+ v = djb2_hash(v, p[2].get_r());
+ v = djb2_hash(v, p[2].get_g());
+ v = djb2_hash(v, p[2].get_b());
+ v & HASH_MASK
+}
+
+impl Glz for rgb24_pixel_t {
+//#define FNAME(name) glz_rgb24_##name
+ fn encode_pixel(&self, e: &Encoder) {
+ encode(e, self.b);
+ encode(e, self.g);
+ encode(e, self.r);
+ }
+ fn same_pixel(&self, pix2: &rgb24_pixel_t) -> bool {
+ same_pixel_rgb(self, pix2)
+ }
+ fn min_ref_encode_size(&self) -> int { 2 }
+ fn max_ref_encode_size(&self) -> int { 2 }
+}
+impl Glz for rgb24_pixel
+ fn hash_func(&self) -> int {
+ hash_rgb(self)
+ }
+}
+
+impl GlzGetters for rgb24_pixel_t {
+ fn get_r(&self) -> BYTE { ((self).r) }
+ fn get_g(&self) -> BYTE { ((self).g) }
+ fn get_b(&self) -> BYTE { ((self).b) }
+}
+
+impl GlzHash for *rgb24_pixel_t {
+ fn hash_func(&self) -> int {
+ hash_rgb(self)
+ }
+}
+
+impl GlzHash for *rgb32_pixel_t {
+ fn hash_func(&self) -> int {
+ hash_rgb(self)
+ }
+}
+
+impl Glz for rgb32_pixel_t {
+//#define FNAME(name) glz_rgb32_##name
+ fn encode_pixel(&self, e: &Encoder) {
+ encode(e, self.b);
+ encode(e, self.g);
+ encode(e, self.r);
+ }
+ fn same_pixel(&self, pix2: &rgb32_pixel_t) -> bool {
+ same_pixel_rgb(self, pix2)
+ }
+ fn min_ref_encode_size(&self) -> int { 2 }
+ fn max_ref_encode_size(&self) -> int { 2 }
+}
+
+impl GlzGetters for rgb32_pixel_t {
+ fn get_r(&self) -> BYTE { ((self).r) }
+ fn get_g(&self) -> BYTE { ((self).g) }
+ fn get_b(&self) -> BYTE { ((self).b) }
+}
+
+
+
+trait PixelTrait {
+ fn pixel_dist(&self, seg: &WindowImageSegment, ref_: *Self, ref_seg: &WindowImageSegment) -> int;
+}
+
+macro_rules! pixel_dist_regular(
+ ($t: ty) => (impl PixelTrait for $t {
+fn pixel_dist(&self, seg: &WindowImageSegment, ref_: *$t, ref_seg: &WindowImageSegment,
+ pix_per_byte: int) -> int
+{
+ pixel_dist(self as *$t, ref_, ref_seg)
+}));
+
+impl PixelTrait for one_byte_pixel_t {
+ fn pixel_dist(&self, seg: &WindowImageSegment, ref_: *Self, ref_seg: &WindowImageSegment)
+ {
+ pixel_dist_ppb(self as *one_byte_pixel_t, seg, ref_, ref_seg, pix_per_byte)
+ }
+}
+
+pixel_dist_regular!(rgb16_pixel_t)
+pixel_dist_regular!(rgb24_pixel_t)
+pixel_dist_regular!(rgb32_pixel_t)
+
+fn pixel_id<PIXEL>(pix_ptr: *PIXEL, seg: &WindowImageSegment) -> int
+{
+ ((p as int) - (seg.lines as int) + seg.pixels_so_far)
+}
+
+fn pixel_dist<PIXEL>(src: *PIXEL, src_seg: &WindowImageSegment, ref_: *PIXEL,
+ ref_seg: &WindowImageSegment) -> int
+{
+ pixel_id(src, src_seg, pix_per_byte) - pixel_id(ref_, ref_seg, pix_per_byte)
+}
+
+
+fn pixel_id_ppb<PIXEL>(pix_ptr: *PIXEL, seg: &WindowImageSegment, pix_per_byte: int) -> int
+{
+ (((p as int) - (seg.lines as int)) * pix_per_byte + seg.pixels_so_far)
+}
+
+fn pixel_dist_ppb<PIXEL>(src: *PIXEL, src_seg: &WindowImageSegment, ref_: *PIXEL,
+ ref_seg: &WindowImageSegment, pix_per_byte: int) -> int
+{
+ ((pixel_id(src, src_seg, pix_per_byte) -
+ pixel_id(ref_, ref_seg, pix_per_byte)) / pix_per_byte)
+}
+
+/*
+ * TODO: moving pix_per_byte which is not required for many pixels to avoid duplicating do_match.
+ * pix_per_byte was added/removed by LZ_PLT macro in C version.
+ */
+
+/* returns the length of the match. 0 if no match.
+ if image_distance = 0, pixel_distance is the distance between the matching pixels.
+ Otherwise, it is the offset from the beginning of the referred image */
+#[inline]
+fn do_match<PIXEL>(dict: *SharedDictionary,
+ ref_seg: *WindowImageSegment,
+ _ref: *PIXEL,
+ ref_limit: *PIXEL,
+ ip_seg: *WindowImageSegment,
+ ip: *PIXEL,
+ ip_limit: *PIXEL,
+ pix_per_byte: int,
+ o_image_dist: *size_t,
+ o_pix_distance: *size_t)
+ -> size_t
+{
+ let tmp_ip = ip;
+ let tmp_ref = _ref;
+
+ if (ref > (ref_limit - MIN_REF_ENCODE_SIZE)) {
+ return 0; // in case the hash entry is not relevant
+ }
+
+
+ /* min match length == MIN_REF_ENCODE_SIZE (depends on pixel type) */
+
+
+ if !tmp_ref.same_pixel_run(tmp_ip) {
+ return 0;
+ }
+
+ *o_image_dist = ip_seg->image->id - ref_seg->image->id;
+
+ if (!(*o_image_dist)) { // the ref is inside the same image - encode distance
+ // in bytes
+ *o_pix_distance = pixel_dist(ip, ip_seg, ref, ref_seg, pix_per_byte);
+ } else { // the ref is at different image - encode offset from the image start
+ // in bytes
+ *o_pix_distance = pixel_dist(ref, ref_seg,
+ (PIXEL *)(dict->window.segs[ref_seg->image->first_seg].lines),
+ &dict->window.segs[ref_seg->image->first_seg],
+ pix_per_byte);
+ }
+
+ if ((*o_pix_distance == 0) || (*o_pix_distance >= MAX_PIXEL_LONG_DISTANCE) ||
+ (*o_image_dist > MAX_IMAGE_DIST)) {
+ return 0;
+ }
+
+
+ /* continue the match*/
+ while ((tmp_ip < ip_limit) && (tmp_ref < ref_limit)) {
+ if (!same_pixel(*tmp_ref, *tmp_ip)) {
+ break;
+ } else {
+ tmp_ref++;
+ tmp_ip++;
+ }
+ }
+
+
+ if ((tmp_ip - ip) > MAX_REF_ENCODE_SIZE) {
+ return (tmp_ip - ip);
+ }
+
+ let encode_size = (*ip).get_encode_ref_size(*o_image_dist, *o_pix_distance);
+
+ // min number of identical pixels for a match
+#if defined(LZ_RGB16)
+ encode_size /= 2;
+#elif defined(LZ_RGB24) || defined(LZ_RGB32)
+ encode_size /= 3;
+#endif
+
+ encode_size++; // the minimum match
+ // match len is smaller than the encoding - not worth encoding
+ if ((tmp_ip - ip) < encode_size) {
+ return 0;
+ }
+ return (tmp_ip - ip);
+}
+
+/* TODO: translate these to rust
+*
+ * Give hints to the compiler for branch prediction optimization.
+ *
+#if defined(__GNUC__) && (__GNUC__ > 2)
+#define LZ_EXPECT_CONDITIONAL(c) (__builtin_expect((c), 1))
+#define LZ_UNEXPECT_CONDITIONAL(c) (__builtin_expect((c), 0))
+#else
+#define LZ_EXPECT_CONDITIONAL(c) (c)
+#define LZ_UNEXPECT_CONDITIONAL(c) (c)
+#endif
+*/
+// TODO: simpler way to do a compile time guranteed nop (maybe just a function?)
+macro_define! LZ_EXPECT_CONDITIONAL(
+ (x:expr) => ($x)
+)
+macro_define! LZ_UNEXPECT_CONDITIONAL(
+ (x:expr) => ($x)
+)
+
+
+/* compresses one segment starting from 'from'.
+ In order to encode a match, we use pixels resolution when we encode RGB image,
+ and bytes count when we encode PLT.
+*/
+fn compress_seg<PIXEL>(encoder: &Encoder, seg_idx: uint32_t, from: *PIXEL, copied: int)
+{
+ WindowImageSegment *seg = &encoder.dict->window.segs[seg_idx];
+ const PIXEL *ip = from;
+ const PIXEL *ip_bound = (PIXEL *)(seg->lines_end) - BOUND_OFFSET;
+ const PIXEL *ip_limit = (PIXEL *)(seg->lines_end) - LIMIT_OFFSET;
+ int hval;
+ int copy = copied;
+ int pix_per_byte = PLT_PIXELS_PER_BYTE[encoder.cur_image._type];
+
+#ifdef DEBUG_ENCODE
+ int n_encoded = 0;
+#endif
+
+ if copy == 0 {
+ encode_copy_count(encoder, MAX_COPY - 1);
+ }
+
+
+ while (LZ_EXPECT_CONDITIONAL!(ip < ip_limit)) {
+ const PIXEL *ref;
+ const PIXEL *ref_limit;
+ WindowImageSegment *ref_seg;
+ uint32_t ref_seg_idx;
+ size_t pix_dist;
+ size_t image_dist;
+ /* minimum match length */
+ size_t len = 0;
+
+ /* comparison starting-point */
+ const PIXEL *anchor = ip;
+#ifdef CHAINED_HASH
+ int hash_id = 0;
+ size_t best_len = 0;
+ size_t best_pix_dist = 0;
+ size_t best_image_dist = 0;
+#endif
+
+ /* check for a run */
+
+ if (LZ_EXPECT_CONDITIONAL!(ip > (PIXEL *)(seg->lines))) {
+ if (same_pixel(ip[-1], ip[0]) && same_pixel(ip[0], ip[1]) && same_pixel(ip[1], ip[2])) {
+ PIXEL x;
+ pix_dist = 1;
+ image_dist = 0;
+
+ ip += 3;
+ ref = anchor + 2;
+ ref_limit = (PIXEL *)(seg->lines_end);
+ len = 3;
+
+ x = *ref;
+
+ while (ip < ip_bound) { // TODO: maybe separate a run from the same seg or from
+ // different ones in order to spare ref < ref_limit
+ if (!same_pixel(*ip, x)) {
+ ip++;
+ break;
+ } else {
+ ip++;
+ len++;
+ }
+ }
+
+ goto match;
+ } // END RLE MATCH
+ }
+
+ /* find potential match */
+ HASH_FUNC(hval, ip);
+
+#ifdef CHAINED_HASH
+ for (hash_id = 0; hash_id < HASH_CHAIN_SIZE; hash_id++) {
+ ref_seg_idx = encoder.dict->htab[hval][hash_id].image_seg_idx;
+#else
+ ref_seg_idx = encoder.dict->htab[hval].image_seg_idx;
+#endif
+ ref_seg = encoder.dict->window.segs + ref_seg_idx;
+ if (REF_SEG_IS_VALID(encoder.dict, encoder.id,
+ ref_seg, seg)) {
+#ifdef CHAINED_HASH
+ ref = ((PIXEL *)ref_seg->lines) + encoder.dict->htab[hval][hash_id].ref_pix_idx;
+#else
+ ref = ((PIXEL *)ref_seg->lines) + encoder.dict->htab[hval].ref_pix_idx;
+#endif
+ ref_limit = (PIXEL *)ref_seg->lines_end;
+
+ len = FNAME(do_match)(encoder.dict, ref_seg, ref, ref_limit, seg, ip, ip_bound,
+#ifdef LZ_PLT
+ pix_per_byte,
+#endif
+ &image_dist, &pix_dist);
+
+#ifdef CHAINED_HASH
+ // TODO. not compare len but rather len - encode_size
+ if (len > best_len) {
+ best_len = len;
+ best_pix_dist = pix_dist;
+ best_image_dist = image_dist;
+ }
+#endif
+ }
+
+#ifdef CHAINED_HASH
+ } // end chain loop
+ len = best_len;
+ pix_dist = best_pix_dist;
+ image_dist = best_image_dist;
+#endif
+
+ /* update hash table */
+ UPDATE_HASH(encoder.dict, hval, seg_idx, anchor - ((PIXEL *)seg->lines));
+
+ if (!len) {
+ goto literal;
+ }
+
+match: // RLE or dictionary (both are encoded by distance from ref (-1) and length)
+#ifdef DEBUG_ENCODE
+ printf(", match(%d, %d, %d)", image_dist, pix_dist, len);
+ n_encoded += len;
+#endif
+
+ /* distance is biased */
+ if (!image_dist) {
+ pix_dist--;
+ }
+
+ /* if we have copied something, adjust the copy count */
+ if (copy) {
+ /* copy is biased, '0' means 1 byte copy */
+ update_copy_count(encoder, copy - 1);
+ } else {
+ /* back, to overwrite the copy count */
+ compress_output_prev(encoder);
+ }
+
+ /* reset literal counter */
+ copy = 0;
+
+ /* length is biased, '1' means a match of 3 pixels for PLT and alpha*/
+ /* for RGB 16 1 means 2 */
+ /* for RGB24/32 1 means 1...*/
+ ip = anchor + len - 2;
+
+#if defined(LZ_RGB16)
+ len--;
+#elif defined(LZ_PLT) || defined(LZ_RGB_ALPHA)
+ len -= 2;
+#endif
+ GLZ_ASSERT(encoder.usr, len > 0);
+ encode_match(encoder, image_dist, pix_dist, len);
+
+ /* update the hash at match boundary */
+#if defined(LZ_RGB16) || defined(LZ_RGB24) || defined(LZ_RGB32)
+ if (ip > anchor) {
+#endif
+ HASH_FUNC(hval, ip);
+ UPDATE_HASH(encoder.dict, hval, seg_idx, ip - ((PIXEL *)seg->lines));
+ ip++;
+#if defined(LZ_RGB16) || defined(LZ_RGB24) || defined(LZ_RGB32)
+ } else {ip++;
+ }
+#endif
+#if defined(LZ_RGB24) || defined(LZ_RGB32)
+ if (ip > anchor) {
+#endif
+ HASH_FUNC(hval, ip);
+ UPDATE_HASH(encoder.dict, hval, seg_idx, ip - ((PIXEL *)seg->lines));
+ ip++;
+#if defined(LZ_RGB24) || defined(LZ_RGB32)
+ } else {
+ ip++;
+ }
+#endif
+ /* assuming literal copy */
+ encode_copy_count(encoder, MAX_COPY - 1);
+ continue;
+
+literal:
+#ifdef DEBUG_ENCODE
+ printf(", copy");
+ n_encoded++;
+#endif
+ encode_pixel(encoder, *anchor);
+ anchor++;
+ ip = anchor;
+ copy++;
+
+ if (LZ_UNEXPECT_CONDITIONAL!(copy == MAX_COPY)) {
+ copy = 0;
+ encode_copy_count(encoder, MAX_COPY - 1);
+ }
+ } // END LOOP (ip < ip_limit)
+
+
+ /* left-over as literal copy */
+ ip_bound++;
+ while (ip <= ip_bound) {
+#ifdef DEBUG_ENCODE
+ printf(", copy");
+ n_encoded++;
+#endif
+ encode_pixel(encoder, *ip);
+ ip++;
+ copy++;
+ if (copy == MAX_COPY) {
+ copy = 0;
+ encode_copy_count(encoder, MAX_COPY - 1);
+ }
+ }
+
+ /* if we have copied something, adjust the copy length */
+ if (copy) {
+ update_copy_count(encoder, copy - 1);
+ } else {
+ compress_output_prev(encoder);
+ }
+#ifdef DEBUG_ENCODE
+ printf("\ntotal encoded=%d\n", n_encoded);
+#endif
+}
+
+
+/* If the file is very small, copies it.
+ copies the first two pixels of the first segment, and sends the segments
+ one by one to compress_seg.
+ the number of bytes compressed are stored inside encoder. */
+fn compress<PIXEL>(encoder: &Encoder)
+{
+ let seg_id = encoder.cur_image.first_win_seg;
+ let ip: *PIXEL;
+ let dict = encoder.dict;
+ let hval: c_int;
+
+ // fetch the first image segment that is not too small
+ while (seg_id != NULL_IMAGE_SEG_ID) &&
+ (dict.window.segs[seg_id].image.id == encoder.cur_image.id) &&
+ ((((PIXEL *)dict->window.segs[seg_id].lines_end) -
+ ((PIXEL *)dict->window.segs[seg_id].lines)) < 4) {
+ // coping the segment
+ if (dict->window.segs[seg_id].lines != dict->window.segs[seg_id].lines_end) {
+ ip = (PIXEL *)dict->window.segs[seg_id].lines;
+ // Note: we assume MAX_COPY > 3
+ encode_copy_count(encoder, (uint8_t)(
+ (((PIXEL *)dict->window.segs[seg_id].lines_end) -
+ ((PIXEL *)dict->window.segs[seg_id].lines)) - 1));
+ while (ip < (PIXEL *)dict->window.segs[seg_id].lines_end) {
+ encode_pixel(encoder, *ip);
+ ip++;
+ }
+ }
+ seg_id = dict.window.segs[seg_id].next;
+ }
+
+ if ((seg_id == NULL_IMAGE_SEG_ID) ||
+ (dict.window.segs[seg_id].image->id != encoder.cur_image.id)) {
+ return;
+ }
+
+ ip = (PIXEL *)dict.window.segs[seg_id].lines;
+
+
+ encode_copy_count(encoder, MAX_COPY - 1);
+
+ HASH_FUNC(hval, ip);
+ UPDATE_HASH(encoder.dict, hval, seg_id, 0);
+
+ encode_pixel(encoder, *ip);
+ ip++;
+ encode_pixel(encoder, *ip);
+ ip++;
+#ifdef DEBUG_ENCODE
+ printf("copy, copy");
+#endif
+ // compressing the first segment
+ FNAME(compress_seg)(encoder, seg_id, ip, 2);
+
+ // compressing the next segments
+ for (seg_id = dict->window.segs[seg_id].next;
+ seg_id != NULL_IMAGE_SEG_ID && (
+ dict->window.segs[seg_id].image->id == encoder.cur_image.id);
+ seg_id = dict->window.segs[seg_id].next) {
+ FNAME(compress_seg)(encoder, seg_id, (PIXEL *)dict->window.segs[seg_id].lines, 0);
+ }
+}
+
+/* glz_encoder.c */
+
+
+/**************************************************************************
+* Handling writing the encoded image to the output buffer
+***************************************************************************/
+#[inline]
+fn more_io_bytes(encoder: *Encoder) -> c_int
+{
+ let io_ptr: *uint8_t;
+ let num_io_bytes = (encoder.usr.more_space)(encoder.usr, &io_ptr);
+ encoder.io.bytes_count += num_io_bytes;
+ encoder.io.now = io_ptr;
+ encoder.io.end = encoder.io.now + num_io_bytes;
+ return num_io_bytes;
+}
+
+#[inline]
+fn encode(encoder: *Encoder, byte: uint8_t)
+{
+ if (encoder.io.now == encoder.io.end) {
+ if (more_io_bytes(encoder) <= 0) {
+ encoder.usr->error(encoder.usr, "%s: no more bytes\n", __FUNCTION__);
+ }
+ GLZ_ASSERT(encoder.usr, encoder.io.now);
+ }
+
+ GLZ_ASSERT(encoder.usr, encoder.io.now < encoder.io.end);
+ *(encoder.io.now++) = byte;
+}
+
+#[inline]
+fn encode_32(encoder: *Encoder, word: c_uint)
+{
+ encode(encoder, (word >> 24) as uint8_t);
+ encode(encoder, ((word >> 16) & 0x0000ff) as uint8_t);
+ encode(encoder, ((word >> 8) & 0x0000ff) as uint8_t);
+ encode(encoder, (word & 0x0000ff)) as uint8_t;
+}
+
+#[inline]
+fn encode_64(encoder: *Encoder, word: uint64_t)
+{
+ encode_32(encoder, (word >> 32) as uint32_t);
+ encode_32(encoder, (word & 0xffffff) as uint32_t);
+}
+
+static INLINE void encode_copy_count(Encoder *encoder, uint8_t copy_count)
+{
+ encode(encoder, copy_count);
+ encoder.io.last_copy = encoder.io.now - 1; // io_now cannot be the first byte of the buffer
+}
+
+static INLINE void update_copy_count(Encoder *encoder, uint8_t copy_count)
+{
+ GLZ_ASSERT(encoder.usr, encoder.io.last_copy);
+ *(encoder.io.last_copy) = copy_count;
+}
+
+// decrease the io ptr by 1
+#[inline]
+fn compress_output_prev(Encoder *encoder)
+{
+ // io_now cannot be the first byte of the buffer
+ encoder.io.now--;
+ // the function should be called only when copy count is written unnecessarily by glz_compress
+ GLZ_ASSERT(encoder.usr, encoder.io.now == encoder.io.last_copy)
+}
+
+fn encoder_reset(encoder: *Encoder, io_ptr: *uint8_t, io_ptr_end: *uint8_t) -> int
+{
+ GLZ_ASSERT(encoder.usr, io_ptr <= io_ptr_end);
+ encoder.io.bytes_count = io_ptr_end - io_ptr;
+ encoder.io.start = io_ptr;
+ encoder.io.now = io_ptr;
+ encoder.io.end = io_ptr_end;
+ encoder.io.last_copy = NULL;
+
+ return TRUE;
+}
+
+/**********************************************************
+* Encoding
+***********************************************************/
+
+fn glz_encoder_create(id: uint8_t, dictionary: *GlzEncDictContext,
+ usr: *GlzEncoderUsrContext) -> *GlzEncoderContext
+{
+ let encoder: *Encoder;
+
+ if !usr || !usr->error || !usr->warn || !usr->info || !usr->malloc ||
+ !usr->free || !usr->more_space {
+ return NULL;
+ }
+
+ if !(encoder = (Encoder *)usr->malloc(usr, sizeof(Encoder))) {
+ return NULL;
+ }
+
+ encoder.id = id;
+ encoder.usr = usr;
+ encoder.dict = dictionary as *SharedDictionary;
+
+ return encoder as *GlzEncoderContext;
+}
+
+fn glz_encoder_destroy(opaque_encoder: *GlzEncoderContext)
+{
+ let encoder = opaque_encoder as *Encoder;
+
+ if !opaque_encoder {
+ return;
+ }
+
+ (encoder.usr.free)(encoder.usr, encoder);
+}
+
+static BOUND_OFFSET: int = 2;
+static LIMIT_OFFSET: int = 6;
+static MIN_FILE_SIZE: int = 4;
+
+static MAX_PIXEL_SHORT_DISTANCE: int = 4096; // (1 << 12)
+static MAX_PIXEL_MEDIUM_DISTANCE: int = 131072; // (1 << 17) 2 ^ (12 + 5)
+static MAX_PIXEL_LONG_DISTANCE: int = 33554432; // (1 << 25) 2 ^ (12 + 5 + 8)
+static MAX_IMAGE_DIST: int = 16777215; // (1 << 24 - 1)
+
+
+//#define DEBUG_ENCODE
+
+/* glz_encode_match_tmpl.c */
+
+static SHORT_PIX_IMAGE_DIST_LEVEL_1 : int = 64; //(1 << 6)
+static SHORT_PIX_IMAGE_DIST_LEVEL_2 : int = 16384; // (1 << 14)
+static SHORT_PIX_IMAGE_DIST_LEVEL_3 : int = 4194304; // (1 << 22)
+static FAR_PIX_IMAGE_DIST_LEVEL_1 : int = 256; // (1 << 8)
+static FAR_PIX_IMAGE_DIST_LEVEL_2 : int = 65536; // (1 << 16)
+static FAR_PIX_IMAGE_DIST_LEVEL_3 : int = 16777216; // (1 << 24)
+
+
+static fn get_encode_ref_size_helper(image_distance: uint32_t , pixel_distance: size_t) -> c_int
+{
+ let mut encode_size = 0;
+
+ if pixel_distance < MAX_PIXEL_SHORT_DISTANCE {
+ if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_1 {
+ encode_size = 3;
+ } else if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_2 {
+ encode_size = 4;
+ } else if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_3 {
+ encode_size = 5;
+ } else {
+ encode_size = 6;
+ }
+ } else {
+ /* the third MSB bit indicates if the pixel_distance is medium/long*/
+ let long_dist_control = if pixel_distance < MAX_PIXEL_MEDIUM_DISTANCE) { 0 } else { 32 }
+ if image_distance == 0 {
+ encode_size = 3;
+ } else if image_distance < FAR_PIX_IMAGE_DIST_LEVEL_1 {
+ encode_size = 4;
+ } else if image_distance < FAR_PIX_IMAGE_DIST_LEVEL_2 {
+ encode_size = 5;
+ } else {
+ encode_size = 6;
+ }
+
+ if long_dist_control {
+ encode_size += 1;
+ }
+ }
+
+ return encode_size;
+}
+
+/* if image_distance = 0, pixel_distance is the distance between the matching pixels.
+ Otherwise, it is the offset from the beginning of the referred image */
+static fn encode_match(encoder: *Encoder, image_distance: uint32_t,
+ pixel_distance: size_t, len: size_t)
+{
+ /* encoding the match length + Long/Short dist bit + 12 LSB pixels of pixel_distance*/
+ if len < 7 {
+ if pixel_distance < MAX_PIXEL_SHORT_DISTANCE {
+ encode(encoder, ((len << 5) + (pixel_distance & 0x0f)) as uint8_t);
+ } else {
+ encode(encoder, ((len << 5) + 16 + (pixel_distance & 0x0f)) as uint8_t);
+ }
+ encode(encoder, ((pixel_distance >> 4) & 255) as uint8_t);
+ } else {
+ if pixel_distance < MAX_PIXEL_SHORT_DISTANCE {
+ encode(encoder, ((7 << 5) + (pixel_distance & 0x0f)) as uint8_t);
+ } else {
+ encode(encoder, ((7 << 5) + 16 + (pixel_distance & 0x0f)) as uint8_t);
+ }
+ len -= 7;
+ while len >= 255 {
+ encode(encoder, 255);
+ len -= 255;
+ }
+ encode(encoder, len as uint8_t);
+ encode(encoder, ((pixel_distance >> 4) & 255) as uint8_t);
+ }
+
+ /* encoding the rest of the pixel distance and the image_dist and its 2 control bits */
+
+ /* The first 2 MSB bits indicate how many more bytes should be read for image dist */
+ if pixel_distance < MAX_PIXEL_SHORT_DISTANCE {
+ if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_1 {
+ encode(encoder, (image_distance & 0x3f) as uint8_t);
+ } else if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_2 {
+ encode(encoder, ((1 << 6) + (image_distance & 0x3f)) as uint8_t);
+ encode(encoder, ((image_distance >> 6) & 255) as uint8_t);
+ } else if image_distance < SHORT_PIX_IMAGE_DIST_LEVEL_3 {
+ encode(encoder, ((1 << 7) + (image_distance & 0x3f)) as uint8_t);
+ encode(encoder, ((image_distance >> 6) & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 14) & 255) as uint8_t);
+ } else {
+ encode(encoder, ((1 << 7) + (1 << 6) + (image_distance & 0x3f)) as uint8_t);
+ encode(encoder, ((image_distance >> 6) & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 14) & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 22) & 255) as uint8_t);
+ }
+ } else {
+ /* the third MSB bit indicates if the pixel_distance is medium/long*/
+ let long_dist_control: uint8_t = if pixel_distance < MAX_PIXEL_MEDIUM_DISTANCE { 0 } else { 32 };
+ if image_distance == 0 {
+ encode(encoder, (long_dist_control + ((pixel_distance >> 12) & 31)) as uint8_t);
+ } else if image_distance < FAR_PIX_IMAGE_DIST_LEVEL_1 {
+ encode(encoder,
+ (long_dist_control + (1 << 6) + ((pixel_distance >> 12) & 31)) as uint8_t);
+ encode(encoder, (image_distance & 255) as uint8_t);
+ } else if image_distance < FAR_PIX_IMAGE_DIST_LEVEL_2 {
+ encode(encoder,
+ (long_dist_control + (1 << 7) + ((pixel_distance >> 12) & 31)) as uint8_t);
+ encode(encoder, (image_distance & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 8) & 255) as uint8_t);
+ } else {
+ encode(encoder,
+ (long_dist_control + (1 << 7) + (1 << 6) +
+ ((pixel_distance >> 12) & 31)) as uint8_t);
+ encode(encoder, (image_distance & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 8) & 255) as uint8_t);
+ encode(encoder, ((image_distance >> 16) & 255) as uint8_t);
+ }
+
+ if long_dist_control {
+ encode(encoder, ((pixel_distance >> 17) & 255) as uint8_t);
+ }
+ }
+}
+
+fn glz_encode(opaque_encoder: *GlzEncoderContext,
+ the_type: LzImageType, width: c_int, height: c_int, top_down: c_int,
+ lines: *uint8_t, num_lines: c_uint, stride: c_int,
+ io_ptr: *uint8_t, num_io_bytes: c_uint,
+ usr_context: *GlzUsrImageContext, o_enc_dict_context: **GlzEncDictImageContext)
+ -> int
+{
+ let encoder = opaque_encoder as *Encoder;
+ let dict_image: *WindowImage;
+ let io_ptr_end: *uint8_t = io_ptr + num_io_bytes;
+ let win_head_image_dist: uint32_t;
+
+ if IS_IMAGE_TYPE_PLT[the_type] {
+ if stride > (width / PLT_PIXELS_PER_BYTE[the_type]) {
+ if ((width % PLT_PIXELS_PER_BYTE[the_type]) == 0) || (
+ (stride - (width / PLT_PIXELS_PER_BYTE[the_type])) > 1) {
+ encoder.usr->error(encoder.usr, "stride overflows (plt)\n");
+ }
+ }
+ } else {
+ if stride != width * RGB_BYTES_PER_PIXEL[the_type] {
+ encoder.usr->error(encoder.usr, "stride != width*bytes_per_pixel (rgb)\n");
+ }
+ }
+
+ // assign the output buffer
+ if !encoder_reset(encoder, io_ptr, io_ptr_end) {
+ encoder.usr->error(encoder.usr, "lz encoder io reset failed\n");
+ }
+
+ // first read the list of the image segments into the dictionary window
+ dict_image = glz_dictionary_pre_encode(encoder.id, encoder.usr,
+ encoder.dict, the_type, width, height, stride,
+ lines, num_lines, usr_context, &win_head_image_dist);
+ *o_enc_dict_context = dict_image as *GlzEncDictImageContext;
+
+ encoder.cur_image._type = the_type;
+ encoder.cur_image.id = dict_image->id;
+ encoder.cur_image.first_win_seg = dict_image->first_seg;
+
+ encode_32(encoder, LZ_MAGIC);
+ encode_32(encoder, LZ_VERSION);
+ if (top_down) {
+ encode(encoder, (the_type & LZ_IMAGE_TYPE_MASK) | (1 << LZ_IMAGE_TYPE_LOG));
+ } else {
+ encode(encoder, (the_type & LZ_IMAGE_TYPE_MASK));
+ }
+
+ encode_32(encoder, width);
+ encode_32(encoder, height);
+ encode_32(encoder, stride);
+ encode_64(encoder, dict_image.id);
+ encode_32(encoder, win_head_image_dist);
+
+ match encoder.cur_image._type {
+ LZ_IMAGE_TYPE_PLT1_BE |
+ LZ_IMAGE_TYPE_PLT1_LE |
+ LZ_IMAGE_TYPE_PLT4_BE |
+ LZ_IMAGE_TYPE_PLT4_LE |
+ LZ_IMAGE_TYPE_PLT8 =>
+ compress<one_byte_pixel_t>(encoder),
+ LZ_IMAGE_TYPE_RGB16 =>
+ compress<rgb16_pixel_t>(encoder),
+ LZ_IMAGE_TYPE_RGB24 =>
+ compress<rgb24_pixel_t>(encoder),
+ LZ_IMAGE_TYPE_RGB32 =>
+ compress<rgb32_pixel_t>(encoder),
+ LZ_IMAGE_TYPE_RGBA => {
+ compress<rgb32_pixel_t>(encoder);
+ compress<RgbAlpha>(encoder);
+ },
+ LZ_IMAGE_TYPE_INVALID | _ => // TODO: drop the catchall
+ (encoder.usr.error)(encoder.usr, "bad image type\n"),
+ }
+
+ glz_dictionary_post_encode(encoder.id, encoder.usr, encoder.dict);
+
+ // move all the used segments to the free ones
+ encoder.io.bytes_count -= (encoder.io.end - encoder.io.now);
+
+ return encoder.io.bytes_count;
+}
diff --git a/common/rust_translation_notes.txt b/common/rust_translation_notes.txt
new file mode 100644
index 0000000..ece37a0
--- /dev/null
+++ b/common/rust_translation_notes.txt
@@ -0,0 +1,56 @@
+#ifdef LZ_PLT
+#define PIXEL one_byte_pixel_t
+#define FNAME(name) glz_plt_##name
+#define ENCODE_PIXEL(e, pix) encode(e, (pix).a) // gets the pixel and write only the needed bytes
+ // from the pixel
+#define SAME_PIXEL(pix1, pix2) ((pix1).a == (pix2).a)
+#define MIN_REF_ENCODE_SIZE 4
+#define MAX_REF_ENCODE_SIZE 7
+#define HASH_FUNC(v, p) { \
+ v = DJB2_START; \
+ DJB2_HASH(v, p[0].a); \
+ DJB2_HASH(v, p[1].a); \
+ DJB2_HASH(v, p[2].a); \
+ v &= HASH_MASK; \
+ }
+#endif
+
+is going to become:
+
+templated functions for anything that looks like a function, using references
+then I will make sure those functions are inlined.
+assuming inlining happens early enough, the resulting code should be the same (!citation/proof needed!)
+
+all functions produced by FNAME are to be templates. They are all statics and so called directly by
+glz_encode.
+
+Only problem is code optionally compiled. i.e.
+
+fn if_i_only_could()
+{
+#ifdef PLT
+ encode(magic_plt);
+#endif
+ encode(always_encode_this);
+}
+
+PIXEL type
+FNAME(name) -> traits
+
+Problems
+========
+
+no nested structs
+no variadic argument C ABI (but can call external variadic argument C ABI fine, i.e. libc printf)
+no preprocessor. poor grasp of traits
+hard to use pointers. or I just don't like offset.
+
+C to Rust
+=========
+
+static void f()
+=>
+fn f()
+
+static by default - that is, for compilation unit which is a crate?. Anyway you need to 'pub' whatever you want visible outside (i.e. in the symbol table)
+