提交 5c3112ae 编写于 作者: B Behdad Esfahbod

s/hb_prealloced_array_t/hb_vector_t/g

Part of https://github.com/harfbuzz/harfbuzz/issues/1017
上级 4a01eb12
......@@ -294,7 +294,7 @@ struct CmapSubtableLongSegmented
}
inline bool serialize (hb_serialize_context_t *c,
hb_prealloced_array_t<CmapSubtableLongGroup> &group_data)
hb_vector_t<CmapSubtableLongGroup> &group_data)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false);
......@@ -548,7 +548,7 @@ struct cmap
}
inline bool populate_groups (hb_subset_plan_t *plan,
hb_prealloced_array_t<CmapSubtableLongGroup> *groups) const
hb_vector_t<CmapSubtableLongGroup> *groups) const
{
CmapSubtableLongGroup *group = nullptr;
for (unsigned int i = 0; i < plan->codepoints.len; i++) {
......@@ -582,7 +582,7 @@ struct cmap
return true;
}
inline bool _subset (hb_prealloced_array_t<CmapSubtableLongGroup> &groups,
inline bool _subset (hb_vector_t<CmapSubtableLongGroup> &groups,
size_t dest_sz,
void *dest) const
{
......
......@@ -94,7 +94,7 @@ struct hmtxvmtx
/* All the trailing glyphs with the same advance can use one LongMetric
* and just keep LSB */
hb_prealloced_array_t<hb_codepoint_t> &gids = plan->gids_to_retain_sorted;
hb_vector_t<hb_codepoint_t> &gids = plan->gids_to_retain_sorted;
unsigned int num_advances = gids.len;
unsigned int last_advance = _mtx.get_advance (gids[num_advances - 1]);
while (num_advances > 1
......
......@@ -147,9 +147,9 @@ struct hb_ot_map_t
hb_mask_t global_mask;
hb_prealloced_array_t<feature_map_t, 8> features;
hb_prealloced_array_t<lookup_map_t, 32> lookups[2]; /* GSUB/GPOS */
hb_prealloced_array_t<stage_map_t, 4> stages[2]; /* GSUB/GPOS */
hb_vector_t<feature_map_t, 8> features;
hb_vector_t<lookup_map_t, 32> lookups[2]; /* GSUB/GPOS */
hb_vector_t<stage_map_t, 4> stages[2]; /* GSUB/GPOS */
};
enum hb_ot_map_feature_flags_t {
......@@ -241,8 +241,8 @@ struct hb_ot_map_builder_t
private:
unsigned int current_stage[2]; /* GSUB/GPOS */
hb_prealloced_array_t<feature_info_t, 32> feature_infos;
hb_prealloced_array_t<stage_info_t, 8> stages[2]; /* GSUB/GPOS */
hb_vector_t<feature_info_t, 32> feature_infos;
hb_vector_t<stage_info_t, 8> stages[2]; /* GSUB/GPOS */
};
......
......@@ -74,7 +74,7 @@ struct os2
return result;
}
inline void _update_unicode_ranges (const hb_prealloced_array_t<hb_codepoint_t> &codepoints,
inline void _update_unicode_ranges (const hb_vector_t<hb_codepoint_t> &codepoints,
HBUINT32 ulUnicodeRange[4]) const
{
for (unsigned int i = 0; i < 4; i++)
......@@ -101,7 +101,7 @@ struct os2
}
}
static inline void find_min_and_max_codepoint (const hb_prealloced_array_t<hb_codepoint_t> &codepoints,
static inline void find_min_and_max_codepoint (const hb_vector_t<hb_codepoint_t> &codepoints,
uint16_t *min_cp, /* OUT */
uint16_t *max_cp /* OUT */)
{
......
......@@ -258,7 +258,7 @@ struct post
hb_blob_t *blob;
uint32_t version;
const ArrayOf<HBUINT16> *glyphNameIndex;
hb_prealloced_array_t<uint32_t, 1> index_to_offset;
hb_vector_t<uint32_t, 1> index_to_offset;
const uint8_t *pool;
mutable uint16_t *gids_sorted_by_name;
};
......
......@@ -527,7 +527,7 @@ _hb_ceil_to_4 (unsigned int v)
#define HB_PREALLOCED_ARRAY_INIT {0, 0, nullptr}
template <typename Type, unsigned int StaticSize=16>
struct hb_prealloced_array_t
struct hb_vector_t
{
unsigned int len;
unsigned int allocated;
......@@ -710,14 +710,14 @@ struct hb_auto_t : Type
void fini (void) {}
};
template <typename Type>
struct hb_auto_array_t : hb_auto_t <hb_prealloced_array_t <Type> > {};
struct hb_auto_array_t : hb_auto_t <hb_vector_t <Type> > {};
#define HB_LOCKABLE_SET_INIT {HB_PREALLOCED_ARRAY_INIT}
template <typename item_t, typename lock_t>
struct hb_lockable_set_t
{
hb_prealloced_array_t <item_t, 1> items;
hb_vector_t <item_t, 1> items;
inline void init (void) { items.init (); }
......
......@@ -190,8 +190,8 @@ struct hb_set_t
ASSERT_POD ();
bool in_error;
mutable unsigned int population;
hb_prealloced_array_t<page_map_t, 8> page_map;
hb_prealloced_array_t<page_t, 1> pages;
hb_vector_t<page_map_t, 8> page_map;
hb_vector_t<page_t, 1> pages;
inline void init (void)
{
......
......@@ -32,12 +32,12 @@
static bool
_calculate_glyf_and_loca_prime_size (const OT::glyf::accelerator_t &glyf,
hb_prealloced_array_t<hb_codepoint_t> &glyph_ids,
hb_vector_t<hb_codepoint_t> &glyph_ids,
hb_bool_t drop_hints,
bool *use_short_loca /* OUT */,
unsigned int *glyf_size /* OUT */,
unsigned int *loca_size /* OUT */,
hb_prealloced_array_t<unsigned int> *instruction_ranges /* OUT */)
hb_vector_t<unsigned int> *instruction_ranges /* OUT */)
{
unsigned int total = 0;
for (unsigned int i = 0; i < glyph_ids.len; i++)
......@@ -159,13 +159,13 @@ _write_glyf_and_loca_prime (hb_subset_plan_t *plan,
const OT::glyf::accelerator_t &glyf,
const char *glyf_data,
bool use_short_loca,
hb_prealloced_array_t<unsigned int> &instruction_ranges,
hb_vector_t<unsigned int> &instruction_ranges,
unsigned int glyf_prime_size,
char *glyf_prime_data /* OUT */,
unsigned int loca_prime_size,
char *loca_prime_data /* OUT */)
{
hb_prealloced_array_t<hb_codepoint_t> &glyph_ids = plan->gids_to_retain_sorted;
hb_vector_t<hb_codepoint_t> &glyph_ids = plan->gids_to_retain_sorted;
char *glyf_prime_data_next = glyf_prime_data;
bool success = true;
......@@ -234,11 +234,11 @@ _hb_subset_glyf_and_loca (const OT::glyf::accelerator_t &glyf,
hb_blob_t **loca_prime /* OUT */)
{
// TODO(grieger): Sanity check allocation size for the new table.
hb_prealloced_array_t<hb_codepoint_t> &glyphs_to_retain = plan->gids_to_retain_sorted;
hb_vector_t<hb_codepoint_t> &glyphs_to_retain = plan->gids_to_retain_sorted;
unsigned int glyf_prime_size;
unsigned int loca_prime_size;
hb_prealloced_array_t<unsigned int> instruction_ranges;
hb_vector_t<unsigned int> instruction_ranges;
instruction_ranges.init();
if (unlikely (!_calculate_glyf_and_loca_prime_size (glyf,
......
......@@ -87,7 +87,7 @@ hb_subset_plan_add_table (hb_subset_plan_t *plan,
static void
_populate_codepoints (hb_set_t *input_codepoints,
hb_prealloced_array_t<hb_codepoint_t>& plan_codepoints)
hb_vector_t<hb_codepoint_t>& plan_codepoints)
{
plan_codepoints.alloc (hb_set_get_population (input_codepoints));
hb_codepoint_t cp = -1;
......@@ -121,9 +121,9 @@ _add_gid_and_children (const OT::glyf::accelerator_t &glyf,
static void
_populate_gids_to_retain (hb_face_t *face,
hb_prealloced_array_t<hb_codepoint_t>& codepoints,
hb_prealloced_array_t<hb_codepoint_t>& old_gids,
hb_prealloced_array_t<hb_codepoint_t>& old_gids_sorted)
hb_vector_t<hb_codepoint_t>& codepoints,
hb_vector_t<hb_codepoint_t>& old_gids,
hb_vector_t<hb_codepoint_t>& old_gids_sorted)
{
OT::cmap::accelerator_t cmap;
OT::glyf::accelerator_t glyf;
......
......@@ -45,12 +45,12 @@ struct hb_subset_plan_t {
// These first two lists provide a mapping from cp -> gid
// As a result it does not list the full set of glyphs to retain.
hb_prealloced_array_t<hb_codepoint_t> codepoints;
hb_prealloced_array_t<hb_codepoint_t> gids_to_retain;
hb_vector_t<hb_codepoint_t> codepoints;
hb_vector_t<hb_codepoint_t> gids_to_retain;
// This list contains the complete set of glyphs to retain and may contain
// more glyphs then the lists above.
hb_prealloced_array_t<hb_codepoint_t> gids_to_retain_sorted;
hb_vector_t<hb_codepoint_t> gids_to_retain_sorted;
// Plan is only good for a specific source/dest so keep them with it
hb_face_t *source;
......
......@@ -123,7 +123,7 @@ struct hb_subset_face_data_t
hb_blob_t *blob;
};
hb_prealloced_array_t<table_entry_t, 32> tables;
hb_vector_t<table_entry_t, 32> tables;
};
static hb_subset_face_data_t *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册