[lookup] Don't initialize skippy if coverage match fails

Currently:

  - Initializing skippy is very expensive,

  - Our lookup accelerator (using set-digests) can be very ineffecite,

As such, we end up many times initializing skippy but then failing
coverage check.  Reordering fixes that.

When, later, we fix our accelerator to have truly small false-positive
rate (for example by using the frozen-sets), then we might want to
reorder these checks such that we wouldn't calculate coverage number
if skippy is going to fail.

This shows a 5% speedup with Roboto already.
This commit is contained in:
Behdad Esfahbod 2015-01-29 11:08:43 +01:00
parent 7788993bc1
commit baa14e1814
1 changed files with 6 additions and 9 deletions

View File

@ -681,12 +681,11 @@ struct PairPosFormat1
{ {
TRACE_APPLY (this); TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer; hb_buffer_t *buffer = c->buffer;
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (likely (index == NOT_COVERED)) return TRACE_RETURN (false); if (likely (index == NOT_COVERED)) return TRACE_RETURN (false);
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
if (!skippy_iter.next ()) return TRACE_RETURN (false); if (!skippy_iter.next ()) return TRACE_RETURN (false);
return TRACE_RETURN ((this+pairSet[index]).apply (c, &valueFormat1, skippy_iter.idx)); return TRACE_RETURN ((this+pairSet[index]).apply (c, &valueFormat1, skippy_iter.idx));
@ -752,12 +751,11 @@ struct PairPosFormat2
{ {
TRACE_APPLY (this); TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer; hb_buffer_t *buffer = c->buffer;
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (likely (index == NOT_COVERED)) return TRACE_RETURN (false); if (likely (index == NOT_COVERED)) return TRACE_RETURN (false);
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
if (!skippy_iter.next ()) return TRACE_RETURN (false); if (!skippy_iter.next ()) return TRACE_RETURN (false);
unsigned int len1 = valueFormat1.get_len (); unsigned int len1 = valueFormat1.get_len ();
@ -903,12 +901,11 @@ struct CursivePosFormat1
/* We don't handle mark glyphs here. */ /* We don't handle mark glyphs here. */
if (unlikely (_hb_glyph_info_is_mark (&buffer->cur()))) return TRACE_RETURN (false); if (unlikely (_hb_glyph_info_is_mark (&buffer->cur()))) return TRACE_RETURN (false);
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)]; const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)];
if (!this_record.exitAnchor) return TRACE_RETURN (false); if (!this_record.exitAnchor) return TRACE_RETURN (false);
hb_apply_context_t::skipping_forward_iterator_t skippy_iter (c, buffer->idx, 1);
if (skippy_iter.has_no_chance ()) return TRACE_RETURN (false);
if (!skippy_iter.next ()) return TRACE_RETURN (false); if (!skippy_iter.next ()) return TRACE_RETURN (false);
const EntryExitRecord &next_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)]; const EntryExitRecord &next_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)];