2008-01-23 22:14:38 +01:00
|
|
|
/*
|
2011-04-21 23:14:28 +02:00
|
|
|
* Copyright © 2007,2008,2009,2010 Red Hat, Inc.
|
2012-05-11 01:25:34 +02:00
|
|
|
* Copyright © 2012 Google, Inc.
|
2008-01-23 22:14:38 +01:00
|
|
|
*
|
2010-04-22 06:11:43 +02:00
|
|
|
* This is part of HarfBuzz, a text shaping library.
|
2008-01-23 22:14:38 +01:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, without written agreement and without
|
|
|
|
* license or royalty fees, to use, copy, modify, and distribute this
|
|
|
|
* software and its documentation for any purpose, provided that the
|
|
|
|
* above copyright notice and the following two paragraphs appear in
|
|
|
|
* all copies of this software.
|
|
|
|
*
|
|
|
|
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
|
|
|
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
|
|
|
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
|
|
|
* DAMAGE.
|
|
|
|
*
|
|
|
|
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
|
|
|
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
|
|
|
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
|
|
*
|
|
|
|
* Red Hat Author(s): Behdad Esfahbod
|
2012-05-11 01:25:34 +02:00
|
|
|
* Google Author(s): Behdad Esfahbod
|
2008-01-23 22:14:38 +01:00
|
|
|
*/
|
|
|
|
|
2010-06-09 12:32:56 +02:00
|
|
|
#ifndef HB_OPEN_TYPE_PRIVATE_HH
|
|
|
|
#define HB_OPEN_TYPE_PRIVATE_HH
|
2006-12-28 12:10:59 +01:00
|
|
|
|
2011-04-21 00:50:27 +02:00
|
|
|
#include "hb-private.hh"
|
2006-12-28 12:10:59 +01:00
|
|
|
|
2009-08-04 06:58:28 +02:00
|
|
|
#include "hb-blob.h"
|
|
|
|
|
2008-01-23 23:01:55 +01:00
|
|
|
|
2012-08-28 23:57:49 +02:00
|
|
|
namespace OT {
|
|
|
|
|
2010-04-23 00:29:09 +02:00
|
|
|
|
2009-08-04 17:04:32 +02:00
|
|
|
/*
|
|
|
|
* Casts
|
|
|
|
*/
|
|
|
|
|
2010-04-23 22:35:01 +02:00
|
|
|
/* Cast to struct T, reference to reference */
|
2010-04-23 00:29:09 +02:00
|
|
|
template<typename Type, typename TObject>
|
2010-04-23 22:35:01 +02:00
|
|
|
inline const Type& CastR(const TObject &X)
|
2010-04-23 00:29:09 +02:00
|
|
|
{ return reinterpret_cast<const Type&> (X); }
|
|
|
|
template<typename Type, typename TObject>
|
2010-04-23 22:35:01 +02:00
|
|
|
inline Type& CastR(TObject &X)
|
2010-04-23 00:29:09 +02:00
|
|
|
{ return reinterpret_cast<Type&> (X); }
|
2009-08-04 17:04:32 +02:00
|
|
|
|
2010-04-23 22:35:01 +02:00
|
|
|
/* Cast to struct T, pointer to pointer */
|
|
|
|
template<typename Type, typename TObject>
|
|
|
|
inline const Type* CastP(const TObject *X)
|
|
|
|
{ return reinterpret_cast<const Type*> (X); }
|
|
|
|
template<typename Type, typename TObject>
|
|
|
|
inline Type* CastP(TObject *X)
|
|
|
|
{ return reinterpret_cast<Type*> (X); }
|
|
|
|
|
2010-05-10 23:36:03 +02:00
|
|
|
/* StructAtOffset<T>(P,Ofs) returns the struct T& that is placed at memory
|
|
|
|
* location pointed to by P plus Ofs bytes. */
|
|
|
|
template<typename Type>
|
|
|
|
inline const Type& StructAtOffset(const void *P, unsigned int offset)
|
2010-05-10 23:55:03 +02:00
|
|
|
{ return * reinterpret_cast<const Type*> ((const char *) P + offset); }
|
2010-05-10 23:36:03 +02:00
|
|
|
template<typename Type>
|
|
|
|
inline Type& StructAtOffset(void *P, unsigned int offset)
|
2010-05-10 23:55:03 +02:00
|
|
|
{ return * reinterpret_cast<Type*> ((char *) P + offset); }
|
2009-08-04 06:58:28 +02:00
|
|
|
|
2010-04-22 04:30:36 +02:00
|
|
|
/* StructAfter<T>(X) returns the struct T& that is placed after X.
|
2010-04-22 05:01:00 +02:00
|
|
|
* Works with X of variable size also. X must implement get_size() */
|
2010-04-21 21:56:11 +02:00
|
|
|
template<typename Type, typename TObject>
|
|
|
|
inline const Type& StructAfter(const TObject &X)
|
2010-05-10 23:36:03 +02:00
|
|
|
{ return StructAtOffset<Type>(&X, X.get_size()); }
|
2010-04-21 21:56:11 +02:00
|
|
|
template<typename Type, typename TObject>
|
|
|
|
inline Type& StructAfter(TObject &X)
|
2010-05-10 23:36:03 +02:00
|
|
|
{ return StructAtOffset<Type>(&X, X.get_size()); }
|
2010-04-23 00:29:09 +02:00
|
|
|
|
2010-04-21 21:56:11 +02:00
|
|
|
|
2009-11-03 16:47:29 +01:00
|
|
|
|
2010-05-07 01:33:31 +02:00
|
|
|
/*
|
|
|
|
* Size checking
|
|
|
|
*/
|
|
|
|
|
2010-05-13 19:34:17 +02:00
|
|
|
/* Check _assertion in a method environment */
|
2012-06-06 09:07:01 +02:00
|
|
|
#define _DEFINE_INSTANCE_ASSERTION1(_line, _assertion) \
|
|
|
|
inline void _instance_assertion_on_line_##_line (void) const \
|
|
|
|
{ \
|
|
|
|
ASSERT_STATIC (_assertion); \
|
|
|
|
ASSERT_INSTANCE_POD (*this); /* Make sure it's POD. */ \
|
|
|
|
}
|
|
|
|
# define _DEFINE_INSTANCE_ASSERTION0(_line, _assertion) _DEFINE_INSTANCE_ASSERTION1 (_line, _assertion)
|
|
|
|
# define DEFINE_INSTANCE_ASSERTION(_assertion) _DEFINE_INSTANCE_ASSERTION0 (__LINE__, _assertion)
|
|
|
|
|
2010-05-13 19:34:17 +02:00
|
|
|
/* Check that _code compiles in a method environment */
|
2012-06-06 09:07:01 +02:00
|
|
|
#define _DEFINE_COMPILES_ASSERTION1(_line, _code) \
|
|
|
|
inline void _compiles_assertion_on_line_##_line (void) const \
|
2010-05-13 19:34:17 +02:00
|
|
|
{ _code; }
|
2012-06-06 09:07:01 +02:00
|
|
|
# define _DEFINE_COMPILES_ASSERTION0(_line, _code) _DEFINE_COMPILES_ASSERTION1 (_line, _code)
|
|
|
|
# define DEFINE_COMPILES_ASSERTION(_code) _DEFINE_COMPILES_ASSERTION0 (__LINE__, _code)
|
2010-05-10 23:04:20 +02:00
|
|
|
|
|
|
|
|
2010-05-07 01:33:31 +02:00
|
|
|
#define DEFINE_SIZE_STATIC(size) \
|
2012-06-06 09:07:01 +02:00
|
|
|
DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size)); \
|
2010-05-07 01:33:31 +02:00
|
|
|
static const unsigned int static_size = (size); \
|
|
|
|
static const unsigned int min_size = (size)
|
|
|
|
|
2010-05-10 22:57:29 +02:00
|
|
|
/* Size signifying variable-sized array */
|
|
|
|
#define VAR 1
|
|
|
|
|
2010-05-11 00:47:48 +02:00
|
|
|
#define DEFINE_SIZE_UNION(size, _member) \
|
2012-06-06 09:07:01 +02:00
|
|
|
DEFINE_INSTANCE_ASSERTION (this->u._member.static_size == (size)); \
|
2010-05-11 00:47:48 +02:00
|
|
|
static const unsigned int min_size = (size)
|
|
|
|
|
2010-05-10 23:28:16 +02:00
|
|
|
#define DEFINE_SIZE_MIN(size) \
|
2012-06-06 09:07:01 +02:00
|
|
|
DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)); \
|
2010-05-10 22:57:29 +02:00
|
|
|
static const unsigned int min_size = (size)
|
|
|
|
|
2010-05-11 01:01:17 +02:00
|
|
|
#define DEFINE_SIZE_ARRAY(size, array) \
|
2012-06-06 09:07:01 +02:00
|
|
|
DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (array[0])); \
|
|
|
|
DEFINE_COMPILES_ASSERTION ((void) array[0].static_size) \
|
2010-05-07 01:33:31 +02:00
|
|
|
static const unsigned int min_size = (size)
|
|
|
|
|
2010-05-11 01:01:17 +02:00
|
|
|
#define DEFINE_SIZE_ARRAY2(size, array1, array2) \
|
2012-06-06 09:07:01 +02:00
|
|
|
DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (this->array1[0]) + sizeof (this->array2[0])); \
|
|
|
|
DEFINE_COMPILES_ASSERTION ((void) array1[0].static_size; (void) array2[0].static_size) \
|
2010-05-07 01:33:31 +02:00
|
|
|
static const unsigned int min_size = (size)
|
|
|
|
|
|
|
|
|
|
|
|
|
2008-01-23 08:01:37 +01:00
|
|
|
/*
|
2010-05-03 00:14:25 +02:00
|
|
|
* Null objects
|
2008-01-23 08:01:37 +01:00
|
|
|
*/
|
|
|
|
|
2009-05-17 04:48:14 +02:00
|
|
|
/* Global nul-content Null pool. Enlarge as necessary. */
|
2012-06-07 23:42:48 +02:00
|
|
|
/* TODO This really should be a extern HB_INTERNAL and defined somewhere... */
|
2010-05-19 18:03:35 +02:00
|
|
|
static const void *_NullPool[64 / sizeof (void *)];
|
2009-05-17 04:48:14 +02:00
|
|
|
|
2010-05-11 01:58:25 +02:00
|
|
|
/* Generic nul-content Null objects. */
|
2009-05-17 04:48:14 +02:00
|
|
|
template <typename Type>
|
2010-10-02 00:58:50 +02:00
|
|
|
static inline const Type& Null (void) {
|
2010-05-11 00:08:46 +02:00
|
|
|
ASSERT_STATIC (Type::min_size <= sizeof (_NullPool));
|
2010-04-23 22:35:01 +02:00
|
|
|
return *CastP<Type> (_NullPool);
|
2010-04-21 06:32:47 +02:00
|
|
|
}
|
2009-05-17 04:48:14 +02:00
|
|
|
|
|
|
|
/* Specializaiton for arbitrary-content arbitrary-sized Null objects. */
|
2010-05-07 01:35:19 +02:00
|
|
|
#define DEFINE_NULL_DATA(Type, data) \
|
|
|
|
static const char _Null##Type[Type::min_size + 1] = data; /* +1 is for nul-termination in data */ \
|
2009-05-17 04:48:14 +02:00
|
|
|
template <> \
|
2010-10-02 00:58:50 +02:00
|
|
|
inline const Type& Null<Type> (void) { \
|
2010-04-23 22:35:01 +02:00
|
|
|
return *CastP<Type> (_Null##Type); \
|
2010-04-22 16:26:35 +02:00
|
|
|
} /* The following line really exists such that we end in a place needing semicolon */ \
|
2010-05-10 23:28:16 +02:00
|
|
|
ASSERT_STATIC (Type::min_size + 1 <= sizeof (_Null##Type))
|
2009-05-17 04:48:14 +02:00
|
|
|
|
|
|
|
/* Accessor macro. */
|
2010-04-21 06:32:47 +02:00
|
|
|
#define Null(Type) Null<Type>()
|
2009-05-17 04:48:14 +02:00
|
|
|
|
|
|
|
|
2008-01-23 08:01:37 +01:00
|
|
|
|
2009-08-05 01:31:02 +02:00
|
|
|
/*
|
|
|
|
* Sanitize
|
|
|
|
*/
|
|
|
|
|
2009-08-28 22:31:20 +02:00
|
|
|
#ifndef HB_DEBUG_SANITIZE
|
2010-11-03 20:11:04 +01:00
|
|
|
#define HB_DEBUG_SANITIZE (HB_DEBUG+0)
|
2009-08-28 22:31:20 +02:00
|
|
|
#endif
|
|
|
|
|
2010-05-05 05:21:57 +02:00
|
|
|
|
2010-04-29 07:40:26 +02:00
|
|
|
#define TRACE_SANITIZE() \
|
2012-05-11 01:25:34 +02:00
|
|
|
hb_auto_trace_t<HB_DEBUG_SANITIZE> trace (&c->debug_depth, "SANITIZE", this, HB_FUNC, "");
|
2010-04-29 02:25:22 +02:00
|
|
|
|
2009-08-05 04:35:36 +02:00
|
|
|
|
2010-04-29 08:19:21 +02:00
|
|
|
struct hb_sanitize_context_t
|
2009-08-05 01:31:02 +02:00
|
|
|
{
|
2011-06-15 15:49:58 +02:00
|
|
|
inline void init (hb_blob_t *b)
|
2010-05-05 04:42:49 +02:00
|
|
|
{
|
2011-06-15 15:49:58 +02:00
|
|
|
this->blob = hb_blob_reference (b);
|
2011-05-07 04:28:26 +02:00
|
|
|
this->writable = false;
|
|
|
|
}
|
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
inline void start_processing (void)
|
2011-05-07 04:28:26 +02:00
|
|
|
{
|
2011-05-11 20:30:56 +02:00
|
|
|
this->start = hb_blob_get_data (this->blob, NULL);
|
|
|
|
this->end = this->start + hb_blob_get_length (this->blob);
|
2010-05-05 04:42:49 +02:00
|
|
|
this->edit_count = 0;
|
2010-05-05 05:21:57 +02:00
|
|
|
this->debug_depth = 0;
|
2009-08-05 01:31:02 +02:00
|
|
|
|
2012-05-11 00:16:40 +02:00
|
|
|
DEBUG_MSG_LEVEL (SANITIZE, this->blob, 0, +1,
|
2012-05-11 03:16:57 +02:00
|
|
|
"start [%p..%p] (%lu bytes)",
|
2012-05-11 00:16:40 +02:00
|
|
|
this->start, this->end,
|
|
|
|
(unsigned long) (this->end - this->start));
|
2010-05-05 04:42:49 +02:00
|
|
|
}
|
2010-04-29 08:19:21 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
inline void end_processing (void)
|
2010-05-05 04:42:49 +02:00
|
|
|
{
|
2012-05-11 00:16:40 +02:00
|
|
|
DEBUG_MSG_LEVEL (SANITIZE, this->blob, 0, -1,
|
2012-05-11 03:16:57 +02:00
|
|
|
"end [%p..%p] %u edit requests",
|
2012-05-11 00:16:40 +02:00
|
|
|
this->start, this->end, this->edit_count);
|
2009-08-05 01:31:02 +02:00
|
|
|
|
2010-05-05 04:42:49 +02:00
|
|
|
hb_blob_destroy (this->blob);
|
|
|
|
this->blob = NULL;
|
|
|
|
this->start = this->end = NULL;
|
|
|
|
}
|
2009-08-05 05:01:23 +02:00
|
|
|
|
2010-05-06 15:24:24 +02:00
|
|
|
inline bool check_range (const void *base, unsigned int len) const
|
2010-05-05 04:42:49 +02:00
|
|
|
{
|
2010-05-10 23:55:03 +02:00
|
|
|
const char *p = (const char *) base;
|
2010-05-05 04:42:49 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
hb_auto_trace_t<HB_DEBUG_SANITIZE> trace (&this->debug_depth, "SANITIZE", this->blob, NULL,
|
|
|
|
"check_range [%p..%p] (%d bytes) in [%p..%p]",
|
|
|
|
p, p + len, len,
|
|
|
|
this->start, this->end);
|
2010-05-05 04:42:49 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
return TRACE_RETURN (likely (this->start <= p && p <= this->end && (unsigned int) (this->end - p) >= len));
|
2010-05-05 04:42:49 +02:00
|
|
|
}
|
2009-08-05 01:31:02 +02:00
|
|
|
|
2010-05-06 02:15:14 +02:00
|
|
|
inline bool check_array (const void *base, unsigned int record_size, unsigned int len) const
|
2010-05-05 04:42:49 +02:00
|
|
|
{
|
2010-05-10 23:55:03 +02:00
|
|
|
const char *p = (const char *) base;
|
2011-04-28 22:01:01 +02:00
|
|
|
bool overflows = _hb_unsigned_int_mul_overflows (len, record_size);
|
2009-08-14 22:25:33 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
hb_auto_trace_t<HB_DEBUG_SANITIZE> trace (&this->debug_depth, "SANITIZE", this->blob, NULL,
|
|
|
|
"check_array [%p..%p] (%d*%d=%ld bytes) in [%p..%p]",
|
|
|
|
p, p + (record_size * len), record_size, len, (unsigned long) record_size * len,
|
|
|
|
this->start, this->end);
|
2010-04-29 19:48:26 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
return TRACE_RETURN (likely (!overflows && this->check_range (base, record_size * len)));
|
2010-05-05 04:42:49 +02:00
|
|
|
}
|
2010-04-23 19:57:10 +02:00
|
|
|
|
2010-05-06 20:48:27 +02:00
|
|
|
template <typename Type>
|
|
|
|
inline bool check_struct (const Type *obj) const
|
|
|
|
{
|
2010-05-11 00:13:32 +02:00
|
|
|
return likely (this->check_range (obj, obj->min_size));
|
2010-05-06 20:48:27 +02:00
|
|
|
}
|
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
inline bool may_edit (const void *base HB_UNUSED, unsigned int len HB_UNUSED)
|
2010-05-05 04:42:49 +02:00
|
|
|
{
|
2010-05-10 23:55:03 +02:00
|
|
|
const char *p = (const char *) base;
|
2010-05-05 04:42:49 +02:00
|
|
|
this->edit_count++;
|
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
hb_auto_trace_t<HB_DEBUG_SANITIZE> trace (&this->debug_depth, "SANITIZE", this->blob, NULL,
|
|
|
|
"may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s",
|
|
|
|
this->edit_count,
|
|
|
|
p, p + len, len,
|
|
|
|
this->start, this->end);
|
2010-05-05 04:42:49 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
return TRACE_RETURN (this->writable);
|
2010-05-05 04:42:49 +02:00
|
|
|
}
|
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
mutable unsigned int debug_depth;
|
2010-05-05 04:42:49 +02:00
|
|
|
const char *start, *end;
|
|
|
|
bool writable;
|
|
|
|
unsigned int edit_count;
|
|
|
|
hb_blob_t *blob;
|
|
|
|
};
|
2009-08-14 23:31:16 +02:00
|
|
|
|
2009-08-05 01:31:02 +02:00
|
|
|
|
|
|
|
|
2009-08-05 02:52:47 +02:00
|
|
|
/* Template to sanitize an object. */
|
|
|
|
template <typename Type>
|
|
|
|
struct Sanitizer
|
|
|
|
{
|
|
|
|
static hb_blob_t *sanitize (hb_blob_t *blob) {
|
2010-05-13 20:18:49 +02:00
|
|
|
hb_sanitize_context_t c[1] = {{0}};
|
2009-08-05 02:52:47 +02:00
|
|
|
bool sane;
|
|
|
|
|
2009-08-07 00:34:47 +02:00
|
|
|
/* TODO is_sane() stuff */
|
2009-08-05 02:52:47 +02:00
|
|
|
|
2011-05-07 04:28:26 +02:00
|
|
|
c->init (blob);
|
|
|
|
|
2009-08-05 02:52:47 +02:00
|
|
|
retry:
|
2011-07-26 02:25:44 +02:00
|
|
|
DEBUG_MSG_FUNC (SANITIZE, blob, "start");
|
2009-08-05 05:01:23 +02:00
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
c->start_processing ();
|
2009-08-05 02:52:47 +02:00
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
if (unlikely (!c->start)) {
|
2012-05-11 03:16:57 +02:00
|
|
|
c->end_processing ();
|
2010-05-11 02:07:56 +02:00
|
|
|
return blob;
|
|
|
|
}
|
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
Type *t = CastP<Type> (const_cast<char *> (c->start));
|
2009-08-05 02:52:47 +02:00
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
sane = t->sanitize (c);
|
2009-08-05 02:52:47 +02:00
|
|
|
if (sane) {
|
2010-05-13 20:18:49 +02:00
|
|
|
if (c->edit_count) {
|
2011-07-26 02:25:44 +02:00
|
|
|
DEBUG_MSG_FUNC (SANITIZE, blob, "passed first round with %d edits; going for second round", c->edit_count);
|
2010-04-29 19:48:26 +02:00
|
|
|
|
2009-08-20 00:16:50 +02:00
|
|
|
/* sanitize again to ensure no toe-stepping */
|
2010-05-13 20:18:49 +02:00
|
|
|
c->edit_count = 0;
|
|
|
|
sane = t->sanitize (c);
|
|
|
|
if (c->edit_count) {
|
2011-07-26 02:25:44 +02:00
|
|
|
DEBUG_MSG_FUNC (SANITIZE, blob, "requested %d edits in second round; FAILLING", c->edit_count);
|
2009-08-05 02:52:47 +02:00
|
|
|
sane = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2010-05-13 20:18:49 +02:00
|
|
|
unsigned int edit_count = c->edit_count;
|
2011-05-07 04:28:26 +02:00
|
|
|
if (edit_count && !c->writable) {
|
|
|
|
c->start = hb_blob_get_data_writable (blob, NULL);
|
|
|
|
c->end = c->start + hb_blob_get_length (blob);
|
|
|
|
|
|
|
|
if (c->start) {
|
|
|
|
c->writable = true;
|
|
|
|
/* ok, we made it writable by relocating. try again */
|
2011-07-26 02:25:44 +02:00
|
|
|
DEBUG_MSG_FUNC (SANITIZE, blob, "retry");
|
2011-05-07 04:28:26 +02:00
|
|
|
goto retry;
|
|
|
|
}
|
2009-08-05 02:52:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-11 03:16:57 +02:00
|
|
|
c->end_processing ();
|
2011-05-11 20:30:56 +02:00
|
|
|
|
2011-07-26 02:25:44 +02:00
|
|
|
DEBUG_MSG_FUNC (SANITIZE, blob, sane ? "PASSED" : "FAILED");
|
2009-08-05 02:52:47 +02:00
|
|
|
if (sane)
|
|
|
|
return blob;
|
|
|
|
else {
|
|
|
|
hb_blob_destroy (blob);
|
2011-05-03 01:36:39 +02:00
|
|
|
return hb_blob_get_empty ();
|
2009-08-05 02:52:47 +02:00
|
|
|
}
|
|
|
|
}
|
2010-05-11 01:51:57 +02:00
|
|
|
|
|
|
|
static const Type* lock_instance (hb_blob_t *blob) {
|
2011-05-07 04:28:26 +02:00
|
|
|
hb_blob_make_immutable (blob);
|
|
|
|
const char *base = hb_blob_get_data (blob, NULL);
|
2010-05-11 01:51:57 +02:00
|
|
|
return unlikely (!base) ? &Null(Type) : CastP<Type> (base);
|
|
|
|
}
|
2009-08-05 02:52:47 +02:00
|
|
|
};
|
|
|
|
|
2009-04-16 01:50:16 +02:00
|
|
|
|
2010-05-03 00:14:25 +02:00
|
|
|
|
2012-08-30 02:26:08 +02:00
|
|
|
/*
|
|
|
|
* Serialize
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef HB_DEBUG_SERIALIZE
|
|
|
|
#define HB_DEBUG_SERIALIZE (HB_DEBUG+0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#define TRACE_SERIALIZE() \
|
2012-08-30 03:08:59 +02:00
|
|
|
hb_auto_trace_t<HB_DEBUG_SERIALIZE> trace (&c->debug_depth, "SERIALIZE", c, HB_FUNC, "");
|
2012-08-30 02:26:08 +02:00
|
|
|
|
|
|
|
|
|
|
|
struct hb_serialize_context_t
|
|
|
|
{
|
|
|
|
inline void init (void *start, unsigned int size)
|
|
|
|
{
|
|
|
|
this->start = (char *) start;
|
|
|
|
this->end = this->start + size;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void start_processing (void)
|
|
|
|
{
|
|
|
|
this->ran_out_of_room = false;
|
|
|
|
this->head = this->start;
|
|
|
|
this->debug_depth = 0;
|
|
|
|
|
|
|
|
DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1,
|
|
|
|
"start [%p..%p] (%lu bytes)",
|
|
|
|
this->start, this->end,
|
|
|
|
(unsigned long) (this->end - this->start));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void end_processing (void)
|
|
|
|
{
|
|
|
|
DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1,
|
|
|
|
"end [%p..%p] %s",
|
|
|
|
this->start, this->end,
|
|
|
|
this->ran_out_of_room ? "RAN OUT OF ROOM" : "did not ran out of room");
|
|
|
|
|
|
|
|
this->start = this->end = this->head = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Type>
|
2012-09-02 03:56:06 +02:00
|
|
|
inline Type *allocate_size (unsigned int size)
|
2012-08-30 02:26:08 +02:00
|
|
|
{
|
2012-09-02 03:56:06 +02:00
|
|
|
if (unlikely (this->ran_out_of_room || this->end - this->head > size)) {
|
2012-08-30 02:26:08 +02:00
|
|
|
this->ran_out_of_room = true;
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-09-02 03:56:06 +02:00
|
|
|
memset (this->head, 0, size);
|
2012-08-30 03:08:59 +02:00
|
|
|
char *ret = this->head;
|
2012-08-30 02:26:08 +02:00
|
|
|
this->head += size;
|
|
|
|
return reinterpret_cast<Type *> (ret);
|
|
|
|
}
|
|
|
|
|
2012-08-30 03:08:59 +02:00
|
|
|
template <typename Type>
|
2012-09-02 03:56:06 +02:00
|
|
|
inline Type *allocate_min (void)
|
2012-08-30 03:08:59 +02:00
|
|
|
{
|
2012-09-02 03:56:06 +02:00
|
|
|
return this->allocate_size<Type> (Type::min_size);
|
2012-08-30 03:08:59 +02:00
|
|
|
}
|
|
|
|
|
2012-08-30 02:26:08 +02:00
|
|
|
template <typename Type>
|
2012-09-02 03:56:06 +02:00
|
|
|
inline Type *embed (const Type &obj)
|
2012-08-30 02:26:08 +02:00
|
|
|
{
|
2012-09-02 02:48:22 +02:00
|
|
|
unsigned int size = obj.get_size ();
|
2012-09-02 03:56:06 +02:00
|
|
|
Type *ret = this->allocate_size<Type> (size);
|
2012-09-02 02:48:22 +02:00
|
|
|
if (unlikely (!ret)) return NULL;
|
|
|
|
memcpy (ret, obj, size);
|
|
|
|
return ret;
|
2012-08-30 03:08:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Type>
|
2012-09-02 03:56:06 +02:00
|
|
|
inline Type *extend_min (Type &obj)
|
2012-08-30 03:08:59 +02:00
|
|
|
{
|
2012-09-02 02:48:22 +02:00
|
|
|
unsigned int size = obj.min_size;
|
2012-08-30 03:08:59 +02:00
|
|
|
assert (this->start < (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head);
|
2012-09-02 03:56:06 +02:00
|
|
|
this->allocate_size<Type> (((char *) &obj) + size - this->head);
|
2012-08-30 03:08:59 +02:00
|
|
|
return reinterpret_cast<Type *> (&obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Type>
|
2012-09-02 03:56:06 +02:00
|
|
|
inline Type *extend (Type &obj)
|
2012-08-30 03:08:59 +02:00
|
|
|
{
|
2012-09-02 02:48:22 +02:00
|
|
|
unsigned int size = obj.get_size ();
|
|
|
|
assert (this->start < (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head);
|
2012-09-02 03:56:06 +02:00
|
|
|
this->allocate_size<Type> (((char *) &obj) + size - this->head);
|
2012-09-02 02:48:22 +02:00
|
|
|
return reinterpret_cast<Type *> (&obj);
|
2012-08-30 02:26:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void truncate (void *head)
|
|
|
|
{
|
|
|
|
assert (this->start < head && head <= this->head);
|
|
|
|
this->head = (char *) head;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int debug_depth;
|
|
|
|
char *start, *end, *head;
|
|
|
|
bool ran_out_of_room;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-05-03 00:14:25 +02:00
|
|
|
|
2006-12-22 08:21:55 +01:00
|
|
|
/*
|
|
|
|
*
|
2009-08-08 01:46:30 +02:00
|
|
|
* The OpenType Font File: Data Types
|
2006-12-22 08:21:55 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* "The following data types are used in the OpenType font file.
|
|
|
|
* All OpenType fonts use Motorola-style byte ordering (Big Endian):" */
|
|
|
|
|
2009-05-17 06:54:25 +02:00
|
|
|
/*
|
|
|
|
* Int types
|
|
|
|
*/
|
|
|
|
|
2006-12-22 04:31:10 +01:00
|
|
|
|
2011-08-06 00:18:21 +02:00
|
|
|
template <typename Type, int Bytes> struct BEInt;
|
2010-04-21 09:11:46 +02:00
|
|
|
|
|
|
|
template <typename Type>
|
2011-08-06 00:18:21 +02:00
|
|
|
struct BEInt<Type, 2>
|
2010-04-21 09:11:46 +02:00
|
|
|
{
|
|
|
|
public:
|
2010-07-23 20:46:57 +02:00
|
|
|
inline void set (Type i) { hb_be_uint16_put (v,i); }
|
2010-10-02 00:58:50 +02:00
|
|
|
inline operator Type (void) const { return hb_be_uint16_get (v); }
|
2011-04-27 07:49:03 +02:00
|
|
|
inline bool operator == (const BEInt<Type, 2>& o) const { return hb_be_uint16_eq (v, o.v); }
|
2010-04-22 04:49:56 +02:00
|
|
|
inline bool operator != (const BEInt<Type, 2>& o) const { return !(*this == o); }
|
2010-04-21 09:11:46 +02:00
|
|
|
private: uint8_t v[2];
|
|
|
|
};
|
|
|
|
template <typename Type>
|
2011-08-06 00:18:21 +02:00
|
|
|
struct BEInt<Type, 4>
|
2010-04-21 09:11:46 +02:00
|
|
|
{
|
|
|
|
public:
|
2010-07-23 20:46:57 +02:00
|
|
|
inline void set (Type i) { hb_be_uint32_put (v,i); }
|
2010-10-02 00:58:50 +02:00
|
|
|
inline operator Type (void) const { return hb_be_uint32_get (v); }
|
2011-04-27 07:49:03 +02:00
|
|
|
inline bool operator == (const BEInt<Type, 4>& o) const { return hb_be_uint32_eq (v, o.v); }
|
2010-04-22 04:49:56 +02:00
|
|
|
inline bool operator != (const BEInt<Type, 4>& o) const { return !(*this == o); }
|
2010-04-21 09:11:46 +02:00
|
|
|
private: uint8_t v[4];
|
|
|
|
};
|
|
|
|
|
2010-04-22 05:11:45 +02:00
|
|
|
/* Integer types in big-endian order and no alignment requirement */
|
2010-04-21 09:11:46 +02:00
|
|
|
template <typename Type>
|
|
|
|
struct IntType
|
|
|
|
{
|
2010-07-23 20:46:57 +02:00
|
|
|
inline void set (Type i) { v.set (i); }
|
2010-04-22 04:49:56 +02:00
|
|
|
inline operator Type(void) const { return v; }
|
|
|
|
inline bool operator == (const IntType<Type> &o) const { return v == o.v; }
|
|
|
|
inline bool operator != (const IntType<Type> &o) const { return v != o.v; }
|
2010-09-28 22:23:58 +02:00
|
|
|
inline int cmp (Type a) const { Type b = v; return a < b ? -1 : a == b ? 0 : +1; }
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
2010-04-21 09:11:46 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (likely (c->check_struct (this)));
|
2010-04-21 09:11:46 +02:00
|
|
|
}
|
2010-05-10 23:55:03 +02:00
|
|
|
protected:
|
2010-05-10 22:38:32 +02:00
|
|
|
BEInt<Type, sizeof (Type)> v;
|
|
|
|
public:
|
2010-05-07 01:33:31 +02:00
|
|
|
DEFINE_SIZE_STATIC (sizeof (Type));
|
2010-04-21 09:11:46 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef IntType<uint16_t> USHORT; /* 16-bit unsigned integer. */
|
|
|
|
typedef IntType<int16_t> SHORT; /* 16-bit signed integer. */
|
|
|
|
typedef IntType<uint32_t> ULONG; /* 32-bit unsigned integer. */
|
|
|
|
typedef IntType<int32_t> LONG; /* 32-bit signed integer. */
|
|
|
|
|
2011-08-17 14:43:45 +02:00
|
|
|
/* 16-bit signed integer (SHORT) that describes a quantity in FUnits. */
|
|
|
|
typedef SHORT FWORD;
|
|
|
|
|
|
|
|
/* 16-bit unsigned integer (USHORT) that describes a quantity in FUnits. */
|
|
|
|
typedef USHORT UFWORD;
|
|
|
|
|
2010-05-19 17:47:17 +02:00
|
|
|
/* Date represented in number of seconds since 12:00 midnight, January 1,
|
|
|
|
* 1904. The value is represented as a signed 64-bit integer. */
|
|
|
|
struct LONGDATETIME
|
|
|
|
{
|
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (likely (c->check_struct (this)));
|
2010-05-19 17:47:17 +02:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
LONG major;
|
|
|
|
ULONG minor;
|
|
|
|
public:
|
|
|
|
DEFINE_SIZE_STATIC (8);
|
|
|
|
};
|
|
|
|
|
2006-12-22 04:31:10 +01:00
|
|
|
/* Array of four uint8s (length = 32 bits) used to identify a script, language
|
|
|
|
* system, feature, or baseline */
|
2009-05-25 08:41:49 +02:00
|
|
|
struct Tag : ULONG
|
2009-05-20 05:58:54 +02:00
|
|
|
{
|
2006-12-25 15:14:52 +01:00
|
|
|
/* What the char* converters return is NOT nul-terminated. Print using "%.4s" */
|
2010-05-10 23:55:03 +02:00
|
|
|
inline operator const char* (void) const { return reinterpret_cast<const char *> (&this->v); }
|
|
|
|
inline operator char* (void) { return reinterpret_cast<char *> (&this->v); }
|
2010-05-10 22:57:29 +02:00
|
|
|
public:
|
|
|
|
DEFINE_SIZE_STATIC (4);
|
2006-12-22 04:31:10 +01:00
|
|
|
};
|
2010-05-07 01:35:19 +02:00
|
|
|
DEFINE_NULL_DATA (Tag, " ");
|
2006-12-22 04:31:10 +01:00
|
|
|
|
|
|
|
/* Glyph index number, same as uint16 (length = 16 bits) */
|
2009-05-25 08:27:29 +02:00
|
|
|
typedef USHORT GlyphID;
|
2006-12-22 04:31:10 +01:00
|
|
|
|
2010-05-11 04:22:22 +02:00
|
|
|
/* Script/language-system/feature index */
|
|
|
|
struct Index : USHORT {
|
|
|
|
static const unsigned int NOT_FOUND_INDEX = 0xFFFF;
|
|
|
|
};
|
|
|
|
DEFINE_NULL_DATA (Index, "\xff\xff");
|
|
|
|
|
2008-01-23 10:36:40 +01:00
|
|
|
/* Offset to a table, same as uint16 (length = 16 bits), Null offset = 0x0000 */
|
2009-05-25 08:27:29 +02:00
|
|
|
typedef USHORT Offset;
|
|
|
|
|
|
|
|
/* LongOffset to a table, same as uint32 (length = 32 bits), Null offset = 0x00000000 */
|
|
|
|
typedef ULONG LongOffset;
|
|
|
|
|
2006-12-22 04:31:10 +01:00
|
|
|
|
|
|
|
/* CheckSum */
|
2009-05-20 05:58:54 +02:00
|
|
|
struct CheckSum : ULONG
|
|
|
|
{
|
|
|
|
static uint32_t CalcTableChecksum (ULONG *Table, uint32_t Length)
|
|
|
|
{
|
2006-12-22 04:31:10 +01:00
|
|
|
uint32_t Sum = 0L;
|
2010-05-07 01:33:31 +02:00
|
|
|
ULONG *EndPtr = Table+((Length+3) & ~3) / ULONG::static_size;
|
2006-12-22 04:31:10 +01:00
|
|
|
|
|
|
|
while (Table < EndPtr)
|
|
|
|
Sum += *Table++;
|
|
|
|
return Sum;
|
|
|
|
}
|
2010-05-10 22:57:29 +02:00
|
|
|
public:
|
|
|
|
DEFINE_SIZE_STATIC (4);
|
2006-12-22 04:31:10 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Version Numbers
|
|
|
|
*/
|
|
|
|
|
2009-05-24 07:03:24 +02:00
|
|
|
struct FixedVersion
|
2009-05-20 05:58:54 +02:00
|
|
|
{
|
2011-05-31 18:33:11 +02:00
|
|
|
inline uint32_t to_int (void) const { return (major << 16) + minor; }
|
2009-05-24 18:30:40 +02:00
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (c->check_struct (this));
|
2009-08-04 08:09:34 +02:00
|
|
|
}
|
|
|
|
|
2009-05-25 08:27:29 +02:00
|
|
|
USHORT major;
|
2009-05-24 07:03:24 +02:00
|
|
|
USHORT minor;
|
2010-05-10 22:57:29 +02:00
|
|
|
public:
|
|
|
|
DEFINE_SIZE_STATIC (4);
|
2006-12-22 04:31:10 +01:00
|
|
|
};
|
|
|
|
|
2009-08-04 16:41:32 +02:00
|
|
|
|
|
|
|
|
2009-05-17 06:54:25 +02:00
|
|
|
/*
|
2009-08-04 16:41:32 +02:00
|
|
|
* Template subclasses of Offset and LongOffset that do the dereferencing.
|
2010-05-03 00:14:25 +02:00
|
|
|
* Use: (base+offset)
|
2009-05-17 06:54:25 +02:00
|
|
|
*/
|
|
|
|
|
2009-08-04 16:41:32 +02:00
|
|
|
template <typename OffsetType, typename Type>
|
|
|
|
struct GenericOffsetTo : OffsetType
|
|
|
|
{
|
2010-04-21 05:50:45 +02:00
|
|
|
inline const Type& operator () (const void *base) const
|
2009-08-04 16:41:32 +02:00
|
|
|
{
|
|
|
|
unsigned int offset = *this;
|
2010-05-04 04:51:19 +02:00
|
|
|
if (unlikely (!offset)) return Null(Type);
|
2010-05-10 23:36:03 +02:00
|
|
|
return StructAtOffset<Type> (base, offset);
|
2009-08-04 16:41:32 +02:00
|
|
|
}
|
2012-09-02 02:48:22 +02:00
|
|
|
inline Type& operator () (void *base)
|
|
|
|
{
|
|
|
|
unsigned int offset = *this;
|
|
|
|
return StructAtOffset<Type> (base, offset);
|
|
|
|
}
|
|
|
|
|
2012-09-02 03:30:17 +02:00
|
|
|
inline Type& serialize (hb_serialize_context_t *c, void *base)
|
2012-09-02 02:48:22 +02:00
|
|
|
{
|
2012-09-02 03:30:17 +02:00
|
|
|
Type *t = (Type *) c->head;
|
2012-09-04 02:58:03 +02:00
|
|
|
this->set ((char *) t - (char *) base); /* TODO(serialize) Overflow? */
|
2012-09-02 03:30:17 +02:00
|
|
|
return *t;
|
2012-09-02 02:48:22 +02:00
|
|
|
}
|
2009-08-04 16:41:32 +02:00
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c, void *base) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!c->check_struct (this))) return TRACE_RETURN (false);
|
2009-08-04 16:41:32 +02:00
|
|
|
unsigned int offset = *this;
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!offset)) return TRACE_RETURN (true);
|
2010-05-10 23:36:03 +02:00
|
|
|
Type &obj = StructAtOffset<Type> (base, offset);
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (likely (obj.sanitize (c)) || neuter (c));
|
2009-08-04 16:41:32 +02:00
|
|
|
}
|
2010-05-05 04:46:21 +02:00
|
|
|
template <typename T>
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c, void *base, T user_data) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!c->check_struct (this))) return TRACE_RETURN (false);
|
2009-08-04 19:30:49 +02:00
|
|
|
unsigned int offset = *this;
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!offset)) return TRACE_RETURN (true);
|
2010-05-10 23:36:03 +02:00
|
|
|
Type &obj = StructAtOffset<Type> (base, offset);
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (likely (obj.sanitize (c, user_data)) || neuter (c));
|
2010-05-04 20:38:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
/* Set the offset to Null */
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool neuter (hb_sanitize_context_t *c) {
|
2012-05-11 03:16:57 +02:00
|
|
|
if (c->may_edit (this, this->static_size)) {
|
2010-05-04 20:38:08 +02:00
|
|
|
this->set (0); /* 0 is Null offset */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2009-08-04 19:30:49 +02:00
|
|
|
}
|
2009-08-04 16:41:32 +02:00
|
|
|
};
|
|
|
|
template <typename Base, typename OffsetType, typename Type>
|
2012-09-02 02:48:22 +02:00
|
|
|
inline const Type& operator + (const Base &base, const GenericOffsetTo<OffsetType, Type> &offset) { return offset (base); }
|
|
|
|
template <typename Base, typename OffsetType, typename Type>
|
|
|
|
inline Type& operator + (Base &base, GenericOffsetTo<OffsetType, Type> &offset) { return offset (base); }
|
2009-08-04 16:41:32 +02:00
|
|
|
|
2009-05-17 06:54:25 +02:00
|
|
|
template <typename Type>
|
2009-08-04 16:41:32 +02:00
|
|
|
struct OffsetTo : GenericOffsetTo<Offset, Type> {};
|
|
|
|
|
|
|
|
template <typename Type>
|
|
|
|
struct LongOffsetTo : GenericOffsetTo<LongOffset, Type> {};
|
2009-08-08 01:46:30 +02:00
|
|
|
|
|
|
|
|
2009-08-04 16:41:32 +02:00
|
|
|
/*
|
|
|
|
* Array Types
|
|
|
|
*/
|
|
|
|
|
|
|
|
template <typename LenType, typename Type>
|
|
|
|
struct GenericArrayOf
|
2009-05-20 05:58:54 +02:00
|
|
|
{
|
2010-04-22 06:27:39 +02:00
|
|
|
const Type *sub_array (unsigned int start_offset, unsigned int *pcount /* IN/OUT */) const
|
2009-11-04 22:59:50 +01:00
|
|
|
{
|
|
|
|
unsigned int count = len;
|
2010-05-04 04:51:19 +02:00
|
|
|
if (unlikely (start_offset > count))
|
2009-11-04 22:59:50 +01:00
|
|
|
count = 0;
|
|
|
|
else
|
|
|
|
count -= start_offset;
|
|
|
|
count = MIN (count, *pcount);
|
|
|
|
*pcount = count;
|
2010-05-11 00:20:54 +02:00
|
|
|
return array + start_offset;
|
2009-11-04 22:59:50 +01:00
|
|
|
}
|
|
|
|
|
2009-05-20 05:58:54 +02:00
|
|
|
inline const Type& operator [] (unsigned int i) const
|
|
|
|
{
|
2010-05-04 04:51:19 +02:00
|
|
|
if (unlikely (i >= len)) return Null(Type);
|
2010-05-11 00:20:54 +02:00
|
|
|
return array[i];
|
2009-05-17 06:54:25 +02:00
|
|
|
}
|
2012-08-30 03:08:59 +02:00
|
|
|
inline Type& operator [] (unsigned int i)
|
|
|
|
{
|
|
|
|
return array[i];
|
|
|
|
}
|
2010-10-02 00:58:50 +02:00
|
|
|
inline unsigned int get_size (void) const
|
2010-05-07 01:33:31 +02:00
|
|
|
{ return len.static_size + len * Type::static_size; }
|
2009-05-18 08:03:58 +02:00
|
|
|
|
2012-09-04 05:28:34 +02:00
|
|
|
inline bool serialize (hb_serialize_context_t *c,
|
|
|
|
unsigned int items_len)
|
|
|
|
{
|
|
|
|
TRACE_SERIALIZE ();
|
|
|
|
if (unlikely (!c->extend_min (*this))) return TRACE_RETURN (false);
|
|
|
|
len.set (items_len); /* TODO(serialize) Overflow? */
|
|
|
|
if (unlikely (!c->extend (*this))) return TRACE_RETURN (false);
|
|
|
|
return TRACE_RETURN (true);
|
|
|
|
}
|
|
|
|
|
2012-09-02 03:43:38 +02:00
|
|
|
inline bool serialize (hb_serialize_context_t *c,
|
|
|
|
const Type *items,
|
|
|
|
unsigned int items_len)
|
|
|
|
{
|
|
|
|
TRACE_SERIALIZE ();
|
|
|
|
if (unlikely (!c->extend_min (*this))) return TRACE_RETURN (false);
|
2012-09-04 02:58:03 +02:00
|
|
|
len.set (items_len); /* TODO(serialize) Overflow? */
|
2012-09-02 03:43:38 +02:00
|
|
|
if (unlikely (!c->extend (*this))) return TRACE_RETURN (false);
|
|
|
|
unsigned int count = items_len;
|
|
|
|
for (unsigned int i = 0; i < count; i++)
|
|
|
|
array[i].set (items[i]);
|
|
|
|
return TRACE_RETURN (true);
|
|
|
|
}
|
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false);
|
2010-11-03 20:11:04 +01:00
|
|
|
|
2010-04-21 06:49:40 +02:00
|
|
|
/* Note: for structs that do not reference other structs,
|
|
|
|
* we do not need to call their sanitize() as we already did
|
2010-11-03 20:11:04 +01:00
|
|
|
* a bound check on the aggregate array size. We just include
|
|
|
|
* a small unreachable expression to make sure the structs
|
|
|
|
* pointed to do have a simple sanitize(), ie. they do not
|
|
|
|
* reference other structs via offsets.
|
2010-04-21 06:49:40 +02:00
|
|
|
*/
|
2010-11-03 20:11:04 +01:00
|
|
|
(void) (false && array[0].sanitize (c));
|
|
|
|
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (true);
|
2009-08-04 06:58:28 +02:00
|
|
|
}
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c, void *base) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false);
|
2009-08-04 16:23:01 +02:00
|
|
|
unsigned int count = len;
|
|
|
|
for (unsigned int i = 0; i < count; i++)
|
2010-05-13 20:18:49 +02:00
|
|
|
if (unlikely (!array[i].sanitize (c, base)))
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (false);
|
|
|
|
return TRACE_RETURN (true);
|
2009-08-04 16:23:01 +02:00
|
|
|
}
|
2010-05-05 04:46:21 +02:00
|
|
|
template <typename T>
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c, void *base, T user_data) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false);
|
2009-08-04 19:30:49 +02:00
|
|
|
unsigned int count = len;
|
|
|
|
for (unsigned int i = 0; i < count; i++)
|
2010-05-13 20:18:49 +02:00
|
|
|
if (unlikely (!array[i].sanitize (c, base, user_data)))
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (false);
|
|
|
|
return TRACE_RETURN (true);
|
2009-08-04 19:30:49 +02:00
|
|
|
}
|
2009-08-04 06:58:28 +02:00
|
|
|
|
2010-05-04 20:28:18 +02:00
|
|
|
private:
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize_shallow (hb_sanitize_context_t *c) {
|
2010-05-04 20:28:18 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (c->check_struct (this) && c->check_array (this, Type::static_size, len));
|
2010-05-04 20:28:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
2009-08-04 16:41:32 +02:00
|
|
|
LenType len;
|
2010-05-11 00:20:54 +02:00
|
|
|
Type array[VAR];
|
2010-05-10 22:57:29 +02:00
|
|
|
public:
|
2010-05-11 01:01:17 +02:00
|
|
|
DEFINE_SIZE_ARRAY (sizeof (LenType), array);
|
2009-05-18 08:03:58 +02:00
|
|
|
};
|
|
|
|
|
2009-08-04 16:41:32 +02:00
|
|
|
/* An array with a USHORT number of elements. */
|
|
|
|
template <typename Type>
|
|
|
|
struct ArrayOf : GenericArrayOf<USHORT, Type> {};
|
|
|
|
|
|
|
|
/* An array with a ULONG number of elements. */
|
|
|
|
template <typename Type>
|
|
|
|
struct LongArrayOf : GenericArrayOf<ULONG, Type> {};
|
|
|
|
|
|
|
|
/* Array of Offset's */
|
|
|
|
template <typename Type>
|
|
|
|
struct OffsetArrayOf : ArrayOf<OffsetTo<Type> > {};
|
|
|
|
|
|
|
|
/* Array of LongOffset's */
|
|
|
|
template <typename Type>
|
|
|
|
struct LongOffsetArrayOf : ArrayOf<LongOffsetTo<Type> > {};
|
|
|
|
|
|
|
|
/* LongArray of LongOffset's */
|
|
|
|
template <typename Type>
|
|
|
|
struct LongOffsetLongArrayOf : LongArrayOf<LongOffsetTo<Type> > {};
|
|
|
|
|
2009-08-15 00:40:56 +02:00
|
|
|
/* Array of offsets relative to the beginning of the array itself. */
|
|
|
|
template <typename Type>
|
|
|
|
struct OffsetListOf : OffsetArrayOf<Type>
|
|
|
|
{
|
|
|
|
inline const Type& operator [] (unsigned int i) const
|
|
|
|
{
|
2010-05-04 04:51:19 +02:00
|
|
|
if (unlikely (i >= this->len)) return Null(Type);
|
2010-05-11 00:20:54 +02:00
|
|
|
return this+this->array[i];
|
2009-08-15 00:40:56 +02:00
|
|
|
}
|
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (OffsetArrayOf<Type>::sanitize (c, this));
|
2009-08-15 00:40:56 +02:00
|
|
|
}
|
2010-05-05 04:46:21 +02:00
|
|
|
template <typename T>
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c, T user_data) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (OffsetArrayOf<Type>::sanitize (c, this, user_data));
|
2009-08-15 00:40:56 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-05-18 08:03:58 +02:00
|
|
|
/* An array with a USHORT number of elements,
|
|
|
|
* starting at second element. */
|
|
|
|
template <typename Type>
|
2009-05-20 05:58:54 +02:00
|
|
|
struct HeadlessArrayOf
|
|
|
|
{
|
|
|
|
inline const Type& operator [] (unsigned int i) const
|
|
|
|
{
|
2010-05-04 04:51:19 +02:00
|
|
|
if (unlikely (i >= len || !i)) return Null(Type);
|
2010-05-11 00:20:54 +02:00
|
|
|
return array[i-1];
|
2009-05-18 08:03:58 +02:00
|
|
|
}
|
2010-10-02 00:58:50 +02:00
|
|
|
inline unsigned int get_size (void) const
|
2010-05-07 01:33:31 +02:00
|
|
|
{ return len.static_size + (len ? len - 1 : 0) * Type::static_size; }
|
2009-05-17 06:54:25 +02:00
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize_shallow (hb_sanitize_context_t *c) {
|
|
|
|
return c->check_struct (this)
|
|
|
|
&& c->check_array (this, Type::static_size, len);
|
2010-04-22 06:45:42 +02:00
|
|
|
}
|
|
|
|
|
2010-05-13 20:18:49 +02:00
|
|
|
inline bool sanitize (hb_sanitize_context_t *c) {
|
2009-08-28 23:17:11 +02:00
|
|
|
TRACE_SANITIZE ();
|
2012-05-11 01:25:34 +02:00
|
|
|
if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false);
|
2010-11-03 20:11:04 +01:00
|
|
|
|
2010-04-21 06:49:40 +02:00
|
|
|
/* Note: for structs that do not reference other structs,
|
|
|
|
* we do not need to call their sanitize() as we already did
|
2010-11-03 20:11:04 +01:00
|
|
|
* a bound check on the aggregate array size. We just include
|
|
|
|
* a small unreachable expression to make sure the structs
|
|
|
|
* pointed to do have a simple sanitize(), ie. they do not
|
|
|
|
* reference other structs via offsets.
|
2010-04-21 06:49:40 +02:00
|
|
|
*/
|
2010-11-03 20:11:04 +01:00
|
|
|
(void) (false && array[0].sanitize (c));
|
|
|
|
|
2012-05-11 01:25:34 +02:00
|
|
|
return TRACE_RETURN (true);
|
2009-08-04 06:58:28 +02:00
|
|
|
}
|
|
|
|
|
2009-05-17 06:54:25 +02:00
|
|
|
USHORT len;
|
2010-05-11 00:20:54 +02:00
|
|
|
Type array[VAR];
|
2010-05-11 00:08:46 +02:00
|
|
|
public:
|
2010-05-11 01:01:17 +02:00
|
|
|
DEFINE_SIZE_ARRAY (sizeof (USHORT), array);
|
2009-05-17 06:54:25 +02:00
|
|
|
};
|
|
|
|
|
2006-12-22 04:31:10 +01:00
|
|
|
|
2010-07-08 06:40:04 +02:00
|
|
|
/* An array with sorted elements. Supports binary searching. */
|
|
|
|
template <typename Type>
|
|
|
|
struct SortedArrayOf : ArrayOf<Type> {
|
|
|
|
|
|
|
|
template <typename SearchType>
|
|
|
|
inline int search (const SearchType &x) const {
|
2012-06-09 06:50:40 +02:00
|
|
|
unsigned int count = this->len;
|
|
|
|
/* Linear search is *much* faster for small counts. */
|
|
|
|
if (likely (count < 32)) {
|
|
|
|
for (unsigned int i = 0; i < count; i++)
|
|
|
|
if (this->array[i].cmp (x) == 0)
|
|
|
|
return i;
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
struct Cmp {
|
|
|
|
static int cmp (const SearchType *a, const Type *b) { return b->cmp (*a); }
|
|
|
|
};
|
|
|
|
const Type *p = (const Type *) bsearch (&x, this->array, this->len, sizeof (this->array[0]), (hb_compare_func_t) Cmp::cmp);
|
|
|
|
return p ? p - this->array : -1;
|
|
|
|
}
|
2010-07-08 06:40:04 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-08-28 23:57:49 +02:00
|
|
|
} // namespace OT
|
|
|
|
|
2010-07-23 21:11:18 +02:00
|
|
|
|
2009-11-05 00:12:09 +01:00
|
|
|
#endif /* HB_OPEN_TYPE_PRIVATE_HH */
|