Move common structs for TupleVariation from gvar to var-common.hh

Also added a table_base in the iterator and related function to handle
different start address for dataoffset in cvar and gvar
This commit is contained in:
Qunxin Liu 2023-02-06 12:36:05 -08:00 committed by Behdad Esfahbod
parent c0fac016dc
commit 22cc73f3e9
2 changed files with 312 additions and 305 deletions

View File

@ -237,6 +237,314 @@ struct VarStoreInstancer
hb_array_t<int> coords;
};
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
struct TupleVariationHeader
{
unsigned get_size (unsigned axis_count) const
{ return min_size + get_all_tuples (axis_count).get_size (); }
unsigned get_data_size () const { return varDataSize; }
const TupleVariationHeader &get_next (unsigned axis_count) const
{ return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
const hb_array_t<const F2DOT14> shared_tuples) const
{
hb_array_t<const F2DOT14> peak_tuple;
if (has_peak ())
peak_tuple = get_peak_tuple (coord_count);
else
{
unsigned int index = get_index ();
if (unlikely (index * coord_count >= shared_tuples.length))
return 0.f;
peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count);
}
hb_array_t<const F2DOT14> start_tuple;
hb_array_t<const F2DOT14> end_tuple;
if (has_intermediate ())
{
start_tuple = get_start_tuple (coord_count);
end_tuple = get_end_tuple (coord_count);
}
float scalar = 1.f;
for (unsigned int i = 0; i < coord_count; i++)
{
int v = coords[i];
int peak = peak_tuple[i].to_int ();
if (!peak || v == peak) continue;
if (has_intermediate ())
{
int start = start_tuple[i].to_int ();
int end = end_tuple[i].to_int ();
if (unlikely (start > peak || peak > end ||
(start < 0 && end > 0 && peak))) continue;
if (v < start || v > end) return 0.f;
if (v < peak)
{ if (peak != start) scalar *= (float) (v - start) / (peak - start); }
else
{ if (peak != end) scalar *= (float) (end - v) / (end - peak); }
}
else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
else
scalar *= (float) v / peak;
}
return scalar;
}
bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
protected:
struct TuppleIndex : HBUINT16
{
enum Flags {
EmbeddedPeakTuple = 0x8000u,
IntermediateRegion = 0x4000u,
PrivatePointNumbers = 0x2000u,
TupleIndexMask = 0x0FFFu
};
DEFINE_SIZE_STATIC (2);
};
hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
{ return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (0, axis_count); }
hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
HBUINT16 varDataSize; /* The size in bytes of the serialized
* data for this tuple variation table. */
TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
The low 12 bits are an index into a shared tuple
records array. */
/* UnsizedArrayOf<F2DOT14> peakTuple - optional */
/* Peak tuple record for this tuple variation table — optional,
* determined by flags in the tupleIndex value.
*
* Note that this must always be included in the 'cvar' table. */
/* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
/* Intermediate start tuple record for this tuple variation table — optional,
determined by flags in the tupleIndex value. */
/* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
/* Intermediate end tuple record for this tuple variation table — optional,
* determined by flags in the tupleIndex value. */
public:
DEFINE_SIZE_MIN (4);
};
struct TupleVariationData
{
const TupleVariationHeader &get_tuple_var_header (void) const
{ return StructAfter<TupleVariationHeader> (data); }
struct tuple_iterator_t
{
void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
{
var_data_bytes = var_data_bytes_;
var_data = var_data_bytes_.as<TupleVariationData> ();
index = 0;
axis_count = axis_count_;
current_tuple = &var_data->get_tuple_var_header ();
data_offset = 0;
table_base = table_base_;
}
bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
{
if (var_data->has_shared_point_numbers ())
{
const HBUINT8 *base = &(table_base+var_data->data);
const HBUINT8 *p = base;
if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
data_offset = p - base;
}
return true;
}
bool is_valid () const
{
return (index < var_data->tupleVarCount.get_count ()) &&
var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
current_tuple->get_size (axis_count)));
}
bool move_to_next ()
{
data_offset += current_tuple->get_data_size ();
current_tuple = &current_tuple->get_next (axis_count);
index++;
return is_valid ();
}
const HBUINT8 *get_serialized_data () const
{ return &(table_base+var_data->data) + data_offset; }
private:
const TupleVariationData *var_data;
unsigned int index;
unsigned int axis_count;
unsigned int data_offset;
const void *table_base;
public:
hb_bytes_t var_data_bytes;
const TupleVariationHeader *current_tuple;
};
static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
const void *table_base,
hb_vector_t<unsigned int> &shared_indices /* OUT */,
tuple_iterator_t *iterator /* OUT */)
{
iterator->init (var_data_bytes, axis_count, table_base);
if (!iterator->get_shared_indices (shared_indices))
return false;
return iterator->is_valid ();
}
bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
hb_vector_t<unsigned int> &points /* OUT */,
const HBUINT8 *end)
{
enum packed_point_flag_t
{
POINTS_ARE_WORDS = 0x80,
POINT_RUN_COUNT_MASK = 0x7F
};
if (unlikely (p + 1 > end)) return false;
unsigned count = *p++;
if (count & POINTS_ARE_WORDS)
{
if (unlikely (p + 1 > end)) return false;
count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
}
if (unlikely (!points.resize (count, false))) return false;
unsigned n = 0;
unsigned i = 0;
while (i < count)
{
if (unlikely (p + 1 > end)) return false;
unsigned control = *p++;
unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
if (unlikely (i + run_count > count)) return false;
unsigned j;
if (control & POINTS_ARE_WORDS)
{
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
n += *(const HBUINT16 *)p;
points.arrayZ[i] = n;
p += HBUINT16::static_size;
}
}
else
{
if (unlikely (p + run_count > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
n += *p++;
points.arrayZ[i] = n;
}
}
}
return true;
}
static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
hb_vector_t<int> &deltas /* IN/OUT */,
const HBUINT8 *end)
{
enum packed_delta_flag_t
{
DELTAS_ARE_ZERO = 0x80,
DELTAS_ARE_WORDS = 0x40,
DELTA_RUN_COUNT_MASK = 0x3F
};
unsigned i = 0;
unsigned count = deltas.length;
while (i < count)
{
if (unlikely (p + 1 > end)) return false;
unsigned control = *p++;
unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
if (unlikely (i + run_count > count)) return false;
unsigned j;
if (control & DELTAS_ARE_ZERO)
{
for (j = 0; j < run_count; j++, i++)
deltas.arrayZ[i] = 0;
}
else if (control & DELTAS_ARE_WORDS)
{
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
deltas.arrayZ[i] = * (const HBINT16 *) p;
p += HBUINT16::static_size;
}
}
else
{
if (unlikely (p + run_count > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
deltas.arrayZ[i] = * (const HBINT8 *) p++;
}
}
}
return true;
}
bool has_data () const { return tupleVarCount; }
protected:
struct TupleVarCount : HBUINT16
{
bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
unsigned int get_count () const { return (*this) & CountMask; }
protected:
enum Flags
{
SharedPointNumbers= 0x8000u,
CountMask = 0x0FFFu
};
public:
DEFINE_SIZE_STATIC (2);
};
TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
* low 12 bits are the number of tuple variation tables
* for this glyph. The number of tuple variation tables
* can be any number between 1 and 4095. */
Offset16To<HBUINT8>
data; /* Offset from the start of the base table
* to the serialized data. */
/* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
public:
DEFINE_SIZE_MIN (4);
};
} /* namespace OT */

View File

@ -29,6 +29,7 @@
#define HB_OT_VAR_GVAR_TABLE_HH
#include "hb-open-type.hh"
#include "hb-ot-var-common.hh"
/*
* gvar -- Glyph Variation Table
@ -90,311 +91,8 @@ struct contour_point_vector_t : hb_vector_t<contour_point_t>
}
};
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
struct TupleVariationHeader
{
unsigned get_size (unsigned axis_count) const
{ return min_size + get_all_tuples (axis_count).get_size (); }
unsigned get_data_size () const { return varDataSize; }
const TupleVariationHeader &get_next (unsigned axis_count) const
{ return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
const hb_array_t<const F2DOT14> shared_tuples) const
{
hb_array_t<const F2DOT14> peak_tuple;
if (has_peak ())
peak_tuple = get_peak_tuple (coord_count);
else
{
unsigned int index = get_index ();
if (unlikely (index * coord_count >= shared_tuples.length))
return 0.f;
peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count);
}
hb_array_t<const F2DOT14> start_tuple;
hb_array_t<const F2DOT14> end_tuple;
if (has_intermediate ())
{
start_tuple = get_start_tuple (coord_count);
end_tuple = get_end_tuple (coord_count);
}
float scalar = 1.f;
for (unsigned int i = 0; i < coord_count; i++)
{
int v = coords[i];
int peak = peak_tuple[i].to_int ();
if (!peak || v == peak) continue;
if (has_intermediate ())
{
int start = start_tuple[i].to_int ();
int end = end_tuple[i].to_int ();
if (unlikely (start > peak || peak > end ||
(start < 0 && end > 0 && peak))) continue;
if (v < start || v > end) return 0.f;
if (v < peak)
{ if (peak != start) scalar *= (float) (v - start) / (peak - start); }
else
{ if (peak != end) scalar *= (float) (end - v) / (end - peak); }
}
else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
else
scalar *= (float) v / peak;
}
return scalar;
}
bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
protected:
struct TuppleIndex : HBUINT16
{
enum Flags {
EmbeddedPeakTuple = 0x8000u,
IntermediateRegion = 0x4000u,
PrivatePointNumbers = 0x2000u,
TupleIndexMask = 0x0FFFu
};
DEFINE_SIZE_STATIC (2);
};
hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
{ return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (0, axis_count); }
hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
HBUINT16 varDataSize; /* The size in bytes of the serialized
* data for this tuple variation table. */
TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
The low 12 bits are an index into a shared tuple
records array. */
/* UnsizedArrayOf<F2DOT14> peakTuple - optional */
/* Peak tuple record for this tuple variation table — optional,
* determined by flags in the tupleIndex value.
*
* Note that this must always be included in the 'cvar' table. */
/* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
/* Intermediate start tuple record for this tuple variation table — optional,
determined by flags in the tupleIndex value. */
/* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
/* Intermediate end tuple record for this tuple variation table — optional,
* determined by flags in the tupleIndex value. */
public:
DEFINE_SIZE_MIN (4);
};
struct GlyphVariationData
{
const TupleVariationHeader &get_tuple_var_header (void) const
{ return StructAfter<TupleVariationHeader> (data); }
struct tuple_iterator_t
{
void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_)
{
var_data_bytes = var_data_bytes_;
var_data = var_data_bytes_.as<GlyphVariationData> ();
index = 0;
axis_count = axis_count_;
current_tuple = &var_data->get_tuple_var_header ();
data_offset = 0;
}
bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
{
if (var_data->has_shared_point_numbers ())
{
const HBUINT8 *base = &(var_data+var_data->data);
const HBUINT8 *p = base;
if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
data_offset = p - base;
}
return true;
}
bool is_valid () const
{
return (index < var_data->tupleVarCount.get_count ()) &&
var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
current_tuple->get_size (axis_count)));
}
bool move_to_next ()
{
data_offset += current_tuple->get_data_size ();
current_tuple = &current_tuple->get_next (axis_count);
index++;
return is_valid ();
}
const HBUINT8 *get_serialized_data () const
{ return &(var_data+var_data->data) + data_offset; }
private:
const GlyphVariationData *var_data;
unsigned int index;
unsigned int axis_count;
unsigned int data_offset;
public:
hb_bytes_t var_data_bytes;
const TupleVariationHeader *current_tuple;
};
static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
hb_vector_t<unsigned int> &shared_indices /* OUT */,
tuple_iterator_t *iterator /* OUT */)
{
iterator->init (var_data_bytes, axis_count);
if (!iterator->get_shared_indices (shared_indices))
return false;
return iterator->is_valid ();
}
bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
hb_vector_t<unsigned int> &points /* OUT */,
const HBUINT8 *end)
{
enum packed_point_flag_t
{
POINTS_ARE_WORDS = 0x80,
POINT_RUN_COUNT_MASK = 0x7F
};
if (unlikely (p + 1 > end)) return false;
unsigned count = *p++;
if (count & POINTS_ARE_WORDS)
{
if (unlikely (p + 1 > end)) return false;
count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
}
if (unlikely (!points.resize (count, false))) return false;
unsigned n = 0;
unsigned i = 0;
while (i < count)
{
if (unlikely (p + 1 > end)) return false;
unsigned control = *p++;
unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
if (unlikely (i + run_count > count)) return false;
unsigned j;
if (control & POINTS_ARE_WORDS)
{
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
n += *(const HBUINT16 *)p;
points.arrayZ[i] = n;
p += HBUINT16::static_size;
}
}
else
{
if (unlikely (p + run_count > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
n += *p++;
points.arrayZ[i] = n;
}
}
}
return true;
}
static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
hb_vector_t<int> &deltas /* IN/OUT */,
const HBUINT8 *end)
{
enum packed_delta_flag_t
{
DELTAS_ARE_ZERO = 0x80,
DELTAS_ARE_WORDS = 0x40,
DELTA_RUN_COUNT_MASK = 0x3F
};
unsigned i = 0;
unsigned count = deltas.length;
while (i < count)
{
if (unlikely (p + 1 > end)) return false;
unsigned control = *p++;
unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
if (unlikely (i + run_count > count)) return false;
unsigned j;
if (control & DELTAS_ARE_ZERO)
{
for (j = 0; j < run_count; j++, i++)
deltas.arrayZ[i] = 0;
}
else if (control & DELTAS_ARE_WORDS)
{
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
deltas.arrayZ[i] = * (const HBINT16 *) p;
p += HBUINT16::static_size;
}
}
else
{
if (unlikely (p + run_count > end)) return false;
for (j = 0; j < run_count; j++, i++)
{
deltas.arrayZ[i] = * (const HBINT8 *) p++;
}
}
}
return true;
}
bool has_data () const { return tupleVarCount; }
protected:
struct TupleVarCount : HBUINT16
{
bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
unsigned int get_count () const { return (*this) & CountMask; }
protected:
enum Flags
{
SharedPointNumbers= 0x8000u,
CountMask = 0x0FFFu
};
public:
DEFINE_SIZE_STATIC (2);
};
TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
* low 12 bits are the number of tuple variation tables
* for this glyph. The number of tuple variation tables
* can be any number between 1 and 4095. */
Offset16To<HBUINT8>
data; /* Offset from the start of the GlyphVariationData table
* to the serialized data. */
/* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
public:
DEFINE_SIZE_MIN (4);
};
struct GlyphVariationData : TupleVariationData
{};
struct gvar
{
@ -561,6 +259,7 @@ struct gvar
hb_vector_t<unsigned int> shared_indices;
GlyphVariationData::tuple_iterator_t iterator;
if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount,
var_data_bytes.arrayZ,
shared_indices, &iterator))
return true; /* so isn't applied at all */