diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp index 2b7e3125fb4..203b3e93d5d 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp @@ -1217,6 +1217,19 @@ void GraphBuilder::_goto(int from_bci, int to_bci) { append(x); } +void GraphBuilder::hash_inline_context_for_if_node(If* if_node) { + int h = INLINE_HASH_FIRST; + IRScope* s = if_node->scope(); + int caller_bci = bci(); + while (s != NULL) { + s->method()->compute_hash_for_method_and_bci(caller_bci, h); + + log_info(compilation)("C1 insert profiling method: %s, hash: %x, caller bci: %d, caller level: %d", s->method()->get_Method()->external_name(), h, caller_bci, s->level()); + caller_bci = s->caller_bci(); + s = s->caller(); + } + if_node->set_fine_profile_inline_hash(h); +} void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { BlockBegin* tsux = block_at(stream()->get_dest()); @@ -1241,6 +1254,7 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta if (profile_branches()) { // Successors can be rotated by the canonicalizer, check for this case. if_node->set_profiled_method(method()); + hash_inline_context_for_if_node(if_node); if_node->set_should_profile(true); if (if_node->tsux() == fsux) { if_node->set_swapped(true); diff --git a/src/hotspot/share/c1/c1_GraphBuilder.hpp b/src/hotspot/share/c1/c1_GraphBuilder.hpp index 420464143bc..b5123e2cebc 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.hpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.hpp @@ -245,6 +245,9 @@ class GraphBuilder { void increment(); void _goto(int from_bci, int to_bci); void if_node(Value x, If::Condition cond, Value y, ValueStack* stack_before); + + void hash_inline_context_for_if_node(If* if_node); + void if_zero(ValueType* type, If::Condition cond); void if_null(ValueType* type, If::Condition cond); void if_same(ValueType* type, If::Condition cond); diff --git a/src/hotspot/share/c1/c1_IR.cpp b/src/hotspot/share/c1/c1_IR.cpp index fa2c7e71901..0e1d09c4094 100644 --- a/src/hotspot/share/c1/c1_IR.cpp +++ b/src/hotspot/share/c1/c1_IR.cpp @@ -146,6 +146,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe _wrote_fields = false; _wrote_volatile = false; _start = NULL; + _caller_bci = caller_bci; if (osr_bci != -1) { // selective creation of phi functions is not possibel in osr-methods diff --git a/src/hotspot/share/c1/c1_IR.hpp b/src/hotspot/share/c1/c1_IR.hpp index 86304e062cc..cf90d4c4b81 100644 --- a/src/hotspot/share/c1/c1_IR.hpp +++ b/src/hotspot/share/c1/c1_IR.hpp @@ -138,6 +138,7 @@ class IRScope: public CompilationResourceObj { // hierarchy Compilation* _compilation; // the current compilation IRScope* _caller; // the caller scope, or NULL + int _caller_bci; int _level; // the inlining level ciMethod* _method; // the corresponding method IRScopeList _callees; // the inlined method scopes @@ -163,6 +164,7 @@ class IRScope: public CompilationResourceObj { // accessors Compilation* compilation() const { return _compilation; } IRScope* caller() const { return _caller; } + int caller_bci() const { return _caller_bci; } int level() const { return _level; } ciMethod* method() const { return _method; } int max_stack() const; // NOTE: expensive diff --git a/src/hotspot/share/c1/c1_Instruction.hpp b/src/hotspot/share/c1/c1_Instruction.hpp index 118a7f48d4f..9c277925fde 100644 --- a/src/hotspot/share/c1/c1_Instruction.hpp +++ b/src/hotspot/share/c1/c1_Instruction.hpp @@ -1988,6 +1988,7 @@ LEAF(If, BlockEnd) int _profiled_bci; // Canonicalizer may alter bci of If node bool _swapped; // Is the order reversed with respect to the original If in the // bytecode stream? + int _fine_profile_inline_hash; public: // creation // unordered_is_true is valid for float/double compares only @@ -1999,6 +2000,7 @@ LEAF(If, BlockEnd) , _profiled_method(NULL) , _profiled_bci(0) , _swapped(false) + , _fine_profile_inline_hash(0) { ASSERT_VALUES set_flag(UnorderedIsTrueFlag, unordered_is_true); @@ -2023,6 +2025,12 @@ LEAF(If, BlockEnd) int profiled_bci() const { return _profiled_bci; } // set for profiled branches and tiered bool is_swapped() const { return _swapped; } + void set_fine_profile_inline_hash(int hash) { + assert(_fine_profile_inline_hash == 0, "_fine_profile_inline_hash should not been set"); + _fine_profile_inline_hash = hash; + } + int get_fine_profile_inline_hash() { return _fine_profile_inline_hash; } + // manipulation void swap_operands() { Value t = _x; _x = _y; _y = t; diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index 1043ca45f9e..807cd657676 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -965,11 +965,27 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { assert(method != NULL, "method should be set if branch is profiled"); ciMethodData* md = method->method_data_or_null(); assert(md != NULL, "Sanity"); - ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); - assert(data != NULL, "must have profiling data"); - assert(data->is_BranchData(), "need BranchData for two-way branches"); - int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); - int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); + + int inline_hash = 0; + + if (EnableLevel3FineProfiling) { + inline_hash = if_instr->get_fine_profile_inline_hash(); + } + + int taken_count_offset, not_taken_count_offset = 0; + if (inline_hash != 0) { + DataLayout* expanded_data = md->expand_profile_data(if_instr->profiled_bci(), inline_hash); + assert(expanded_data != NULL, "must have expanded data"); + taken_count_offset = md->byte_offset_of_raw_slot(md->constant_encoding(), expanded_data, BranchData::taken_offset()); + not_taken_count_offset = md->byte_offset_of_raw_slot(md->constant_encoding(), expanded_data, BranchData::not_taken_offset()); + } else { + ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); + assert(data != NULL, "must have profiling data"); + assert(data->is_BranchData(), "need BranchData for two-way branches"); + taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); + not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); + } + log_info(compilation)("fine profile inline hash: %x, method: %s, meta: %p, taken_offset: %d, not_taken_offset: %d", inline_hash, method->get_Method()->external_name(), md->constant_encoding(), taken_count_offset, not_taken_count_offset); if (if_instr->is_swapped()) { int t = taken_count_offset; taken_count_offset = not_taken_count_offset; diff --git a/src/hotspot/share/ci/ciMethod.hpp b/src/hotspot/share/ci/ciMethod.hpp index 4805204574e..7d29e46b9b5 100644 --- a/src/hotspot/share/ci/ciMethod.hpp +++ b/src/hotspot/share/ci/ciMethod.hpp @@ -33,6 +33,10 @@ #include "prims/methodHandles.hpp" #include "utilities/bitMap.hpp" +#define INLINE_HASH_A 54059 /* a prime */ +#define INLINE_HASH_B 76963 /* another prime */ +#define INLINE_HASH_FIRST 37 /* also prime */ + class ciMethodBlocks; class MethodLiveness; class Arena; @@ -172,6 +176,16 @@ class ciMethod : public ciMetadata { return m; } + void compute_hash_for_method_and_bci(int bci, int& h) { + const char* method_full_name = get_Method()->external_name(); + size_t idx = 0; + while (method_full_name[idx]) { + h = (h * INLINE_HASH_A) ^ (method_full_name[idx] * INLINE_HASH_B); + idx++; + } + h = (h * INLINE_HASH_A) ^ (bci * INLINE_HASH_B); + } + // Method code and related information. address code() { if (_code == NULL) load_code(); return _code; } int code_size() const { check_is_loaded(); return _code_size; } diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp index a6b38dbde0c..49655e5289a 100644 --- a/src/hotspot/share/ci/ciMethodData.cpp +++ b/src/hotspot/share/ci/ciMethodData.cpp @@ -262,6 +262,7 @@ bool ciMethodData::load_data() { return true; } + void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) { for (uint row = 0; row < row_limit(); row++) { Klass* k = data->as_ReceiverTypeData()->receiver(row); @@ -337,6 +338,39 @@ ciProfileData* ciMethodData::data_at(int data_index) { }; } +ciProfileData* ciMethodData::data_at_layout(DataLayout* data_layout) { + switch (data_layout->tag()) { + case DataLayout::no_tag: + default: + ShouldNotReachHere(); + return NULL; + case DataLayout::bit_data_tag: + return new ciBitData(data_layout); + case DataLayout::counter_data_tag: + return new ciCounterData(data_layout); + case DataLayout::jump_data_tag: + return new ciJumpData(data_layout); + case DataLayout::receiver_type_data_tag: + return new ciReceiverTypeData(data_layout); + case DataLayout::virtual_call_data_tag: + return new ciVirtualCallData(data_layout); + case DataLayout::ret_data_tag: + return new ciRetData(data_layout); + case DataLayout::branch_data_tag: + return new ciBranchData(data_layout); + case DataLayout::multi_branch_data_tag: + return new ciMultiBranchData(data_layout); + case DataLayout::arg_info_data_tag: + return new ciArgInfoData(data_layout); + case DataLayout::call_type_data_tag: + return new ciCallTypeData(data_layout); + case DataLayout::virtual_call_type_data_tag: + return new ciVirtualCallTypeData(data_layout); + case DataLayout::parameters_type_data_tag: + return new ciParametersTypeData(data_layout); + }; +} + // Iteration over data. ciProfileData* ciMethodData::next_data(ciProfileData* current) { int current_index = dp_to_di(current->dp()); @@ -345,6 +379,11 @@ ciProfileData* ciMethodData::next_data(ciProfileData* current) { return next; } +DataLayout* ciMethodData::get_expanded_data(int hash) { + assert(hash != 0, "Illegal expanded data"); + return get_MethodData()->find_expanded_data_with_hash(hash); +} + ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) { DataLayout* dp = extra_data_base(); DataLayout* end = args_data_limit(); @@ -592,7 +631,15 @@ ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_ // Add in counter_offset, the # of bytes into the ProfileData of counter or flag int offset = in_bytes(data_offset) + cell_offset + in_bytes(slot_offset_in_data); + return in_ByteSize(offset); +} +ByteSize ciMethodData::offset_of_raw_slot(Metadata* mdo, DataLayout* data, ByteSize slot_offset_in_data) { + // Get cell offset of the ProfileData within data array + int cell_offset = (address) data - (address) mdo; + + // Add in counter_offset, the # of bytes into the ProfileData of counter or flag + int offset = cell_offset + in_bytes(slot_offset_in_data); return in_ByteSize(offset); } diff --git a/src/hotspot/share/ci/ciMethodData.hpp b/src/hotspot/share/ci/ciMethodData.hpp index 6a95456493e..9ceb5c1297c 100644 --- a/src/hotspot/share/ci/ciMethodData.hpp +++ b/src/hotspot/share/ci/ciMethodData.hpp @@ -469,10 +469,16 @@ class ciMethodData : public ciMetadata { ciArgInfoData *arg_info() const; +public: address data_base() const { return (address) _data; } + DataLayout* expand_profile_data(int bci, int inline_hash) { + return get_MethodData()->expand_data_for_branch(bci, inline_hash); + } + +private: void prepare_metadata(); void load_remaining_extra_data(); ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots); @@ -528,11 +534,15 @@ class ciMethodData : public ciMetadata { // Get the data at an arbitrary (sort of) data index. ciProfileData* data_at(int data_index); + ciProfileData* data_at_layout(DataLayout* data); + // Walk through the data in order. ciProfileData* first_data() { return data_at(first_di()); } ciProfileData* next_data(ciProfileData* current); bool is_valid(ciProfileData* current) { return current != NULL; } + DataLayout* get_expanded_data(int hash); + DataLayout* extra_data_base() const { return data_layout_at(data_size()); } DataLayout* args_data_limit() const { return data_layout_at(data_size() + extra_data_size() - parameters_size()); } @@ -591,7 +601,9 @@ class ciMethodData : public ciMetadata { // Code generation helper ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data); + ByteSize offset_of_raw_slot(Metadata* mdo, DataLayout* data, ByteSize slot_offset_in_data); int byte_offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_slot(data, slot_offset_in_data)); } + int byte_offset_of_raw_slot(Metadata* mdo, DataLayout* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_raw_slot(mdo, data, slot_offset_in_data)); } #ifndef PRODUCT // printing support for method data diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index a25eccf228a..8280d0c7874 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -926,6 +926,12 @@ int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { if (args_cell > 0) { object_size += DataLayout::compute_size_in_bytes(args_cell); } + + // for expanded + if (EnableLevel3FineProfiling) { + object_size += MethodData::expand_data_size_in_bytes(); + } + return object_size; } @@ -1052,7 +1058,7 @@ int MethodData::initialize_data(BytecodeStream* stream, tag == DataLayout::counter_data_tag || tag == DataLayout::virtual_call_type_data_tag || tag == DataLayout::virtual_call_data_tag)) || - cell_count == bytecode_cell_count(c), "cell counts must agree"); + cell_count == bytecode_cell_count(c), "cell counts must agree %d, cell count: %d, verify count: %d", static_cast(c), cell_count, bytecode_cell_count(c)); if (cell_count >= 0) { assert(tag != DataLayout::no_tag, "bad tag"); assert(bytecode_has_profile(c), "agree w/ BHP"); @@ -1140,6 +1146,50 @@ MethodData::MethodData(const methodHandle& method) initialize(); } +DataLayout* MethodData::expand_data_for_branch(int bci, int hash) { + // find avaiable space + size_t insert_index = 0; + for (; insert_index < FineProfilingExtraSize; insert_index++) { + if (_index_to_expand_profile_data[insert_index] == hash) { + int cell_count = BranchData::static_cell_count(); + int total_size = DataLayout::compute_size_in_bytes(cell_count) * insert_index; + log_info(compilation)("expand data exist for hash: %x, data start: %p, expand start: %x, total size: %x", hash, data_base(), non_expand_data_size(), total_size); + return data_layout_at(non_expand_data_size() + total_size); + } else if (_index_to_expand_profile_data[insert_index] == 0) { + _index_to_expand_profile_data[insert_index] = hash; + break; + } + } + if (insert_index == FineProfilingExtraSize) { + log_info(compilation)("expanded data for %p is full", this); + return NULL; + } + + // init expanded profile data + int cell_count = BranchData::static_cell_count(); + int total_size = DataLayout::compute_size_in_bytes(cell_count) * insert_index; + int tag = DataLayout::branch_data_tag; + log_info(compilation)("expand data init for hash: %x, data start: %p, expand start: %x, total size: %x", hash, data_base(), non_expand_data_size(), total_size); + DataLayout* data_layout = data_layout_at(non_expand_data_size() + total_size); + data_layout->initialize(tag, bci, cell_count); + + return data_layout; +} + +DataLayout* MethodData::find_expanded_data_with_hash(int hash) { + size_t insert_index = 0; + for (; insert_index < FineProfilingExtraSize; insert_index++) { + if (_index_to_expand_profile_data[insert_index] == hash) { + int total_size = DataLayout::compute_size_in_bytes(BranchData::static_cell_count()) * insert_index; + DataLayout* data = data_layout_at(non_expand_data_size() + total_size); + log_info(compilation)("find data %p for hash: %x, data start %p, expand start: %x, total size: %x", data, hash, data_base(), non_expand_data_size(), total_size); + return data; + } + } + log_info(compilation)("not find data for hash: %x", hash); + return NULL; +} + void MethodData::initialize() { NoSafepointVerifier no_safepoint; // init function atomic wrt GC ResourceMark rm; @@ -1150,6 +1200,15 @@ void MethodData::initialize() { // corresponding data cells. int data_size = 0; int empty_bc_count = 0; // number of bytecodes lacking data + + if (EnableLevel3FineProfiling) { + _index_to_expand_profile_data = (int*) AllocateHeap(FineProfilingExtraSize * sizeof(int), mtClass); + + for (size_t i = 0; i < FineProfilingExtraSize; i++) { + _index_to_expand_profile_data[i] = 0; + } + } + _data[0] = 0; // apparently not set below. BytecodeStream stream(method()); Bytecodes::Code c; @@ -1206,6 +1265,10 @@ void MethodData::initialize() { post_initialize(&stream); + _non_expand_data_size = data_size + extra_size + arg_data_size; + if (EnableLevel3FineProfiling) { + object_size += MethodData::expand_data_size_in_bytes(); + } assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size"); set_size(object_size); } diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index e4a413b225d..baaac378423 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -1982,6 +1982,9 @@ class MethodData : public Metadata { bool is_methodData() const volatile { return true; } void initialize(); + DataLayout* expand_data_for_branch(int bci, int hash); + DataLayout* find_expanded_data_with_hash(int hash); + // Whole-method sticky bits and flags enum { _trap_hist_limit = Deoptimization::Reason_TRAP_HISTORY_LENGTH, @@ -2106,11 +2109,15 @@ class MethodData : public Metadata { // Size of _data array in bytes. (Excludes header and extra_data fields.) int _data_size; + int _non_expand_data_size; + // data index for the area dedicated to parameters. -1 if no // parameter profiling. enum { no_parameters = -2, parameters_uninitialized = -1 }; int _parameters_type_data_di; + int* _index_to_expand_profile_data; + // Beginning of the data entries intptr_t _data[1]; @@ -2186,6 +2193,10 @@ class MethodData : public Metadata { static bool profile_parameters_jsr292_only(); static bool profile_all_parameters(); + static int expand_data_size_in_bytes() { + return FineProfilingExtraSize * DataLayout::compute_size_in_bytes(BranchData::static_cell_count()); + } + void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false); void verify_extra_data_clean(CleanExtraDataClosure* cl); @@ -2375,9 +2386,21 @@ class MethodData : public Metadata { // Add a handful of extra data records, for trap tracking. DataLayout* extra_data_base() const { return limit_data_position(); } - DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); } - DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() - - parameters_size_in_bytes()); } + DataLayout* extra_data_limit() const { + int expand_size = 0; + if (EnableLevel3FineProfiling) { + expand_size = MethodData::expand_data_size_in_bytes(); + } + return (DataLayout*)((address)this + size_in_bytes() - expand_size); + } + DataLayout* args_data_limit() const { + int expand_size = 0; + if (EnableLevel3FineProfiling) { + expand_size = MethodData::expand_data_size_in_bytes(); + } + return (DataLayout*)((address)this + size_in_bytes() - parameters_size_in_bytes() - expand_size); + } + int non_expand_data_size() const { return _non_expand_data_size; } int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); } static DataLayout* next_extra(DataLayout* dp); diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index aa13e886337..a6bc1c7ae31 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -544,6 +544,8 @@ class Parse : public GraphKit { void do_jsr(); void do_ret(); + int hash_inline_context_for_branch_prediction(ciMethodData* methodData); + float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); bool seems_never_taken(float prob) const; diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index c576622d04f..2de8a809884 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -1275,6 +1275,23 @@ static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, i } return false; } + +int Parse::hash_inline_context_for_branch_prediction(ciMethodData* methodData) { + JVMState* caller = this->caller(); + if (caller == NULL) { + return 0; + } + + int h = INLINE_HASH_FIRST; + this->method()->compute_hash_for_method_and_bci(bci(), h); + while (caller != NULL) { + if (caller->has_method()) { + caller->method()->compute_hash_for_method_and_bci(caller->bci(), h); + } + caller = caller->caller(); + } + return h; +} //--------------------------dynamic_branch_prediction-------------------------- // Try to gather dynamic branch prediction behavior. Return a probability // of the branch being taken and set the "cnt" field. Returns a -1.0 @@ -1292,9 +1309,24 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t if (use_mdo) { // Use MethodData information if it is available // FIXME: free the ProfileData structure + ciMethodData* methodData = method()->method_data(); + if (!methodData->is_mature()) return PROB_UNKNOWN; - ciProfileData* data = methodData->bci_to_data(bci()); + + int hash = 0; + if (EnableLevel3FineProfiling) { + hash = hash_inline_context_for_branch_prediction(methodData); + } + ciProfileData* data = NULL; + if (hash != 0) { + DataLayout* data_layout = methodData->get_expanded_data(hash); + assert(data_layout != NULL, "C2 get null expanded data"); + data = methodData->data_at_layout(data_layout); + } else { + data = methodData->bci_to_data(bci()); + } + log_info(compilation)("c2 get branch prediction, method_data: %p, meta: %p, dp: %p", methodData, methodData->constant_encoding(), data->dp()); if (data == NULL) { return PROB_UNKNOWN; } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 41d911ffa09..ce5799e3b24 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -2698,6 +2698,12 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G); \ product(bool, UseCompactObjectHeaders, false, \ "Use compact 64-bit object headers in 64-bit VM.") \ + \ + product(bool, EnableLevel3FineProfiling, false, \ + "Insert finer profile data during level 3 compiling") \ + \ + product(uintx, FineProfilingExtraSize, 16, \ + "Extra branch data size for finer profile ") #define VM_FLAGS(develop, \ develop_pd, \