diff --git a/src/compiler/evm_frontend/evm_analyzer.h b/src/compiler/evm_frontend/evm_analyzer.h index c5af915c..85e3d11d 100644 --- a/src/compiler/evm_frontend/evm_analyzer.h +++ b/src/compiler/evm_frontend/evm_analyzer.h @@ -114,7 +114,10 @@ struct JITSuitabilityResult { /// Thresholds for JIT suitability fallback. Normal contracts have <20 /// RA-expensive ops per block; these values are conservatively high. -static constexpr size_t MAX_JIT_BYTECODE_SIZE = 0x6000; +/// Bytecode size limit: 64 KB is well above the EIP-170 deployed-code limit +/// (24 576) and the EIP-3860 initcode limit (49 152), so real-world contracts +/// are never affected, while pathological synthetic tests (400 KB+) are caught. +static constexpr size_t MAX_JIT_BYTECODE_SIZE = 0x10000; static constexpr size_t MAX_JIT_MIR_ESTIMATE = 50000; static constexpr size_t MAX_CONSECUTIVE_RA_EXPENSIVE = 128; static constexpr size_t MAX_BLOCK_RA_EXPENSIVE = 256; @@ -306,10 +309,14 @@ class EVMAnalyzer { BlockInfos.emplace(CurInfo.EntryPC, CurInfo); } - // Compute final fallback verdict + // Compute final fallback verdict. + // Pattern-based thresholds target pathological LLVM register-allocator + // cases (dense RA-expensive opcodes, DUP feedback loops). + // The bytecode size guard (64 KB) catches extreme synthetic tests whose + // sheer IR volume stalls LLVM, without affecting real contracts (bounded + // by EIP-170 / EIP-3860). JITResult.ShouldFallback = - BytecodeSize > MAX_JIT_BYTECODE_SIZE || - JITResult.MirEstimate > MAX_JIT_MIR_ESTIMATE || + JITResult.BytecodeSize > MAX_JIT_BYTECODE_SIZE || JITResult.MaxConsecutiveExpensive > MAX_CONSECUTIVE_RA_EXPENSIVE || JITResult.MaxBlockExpensiveCount > MAX_BLOCK_RA_EXPENSIVE || JITResult.DupFeedbackPatternCount > MAX_DUP_FEEDBACK_PATTERN; diff --git a/src/vm/dt_evmc_vm.cpp b/src/vm/dt_evmc_vm.cpp index 2ee646ba..abeae304 100644 --- a/src/vm/dt_evmc_vm.cpp +++ b/src/vm/dt_evmc_vm.cpp @@ -79,6 +79,9 @@ struct DTVM : evmc_vm { std::unique_ptr RT; std::unique_ptr ExecHost; std::unordered_map LoadedMods; +#ifdef ZEN_ENABLE_JIT_PRECOMPILE_FALLBACK + std::unordered_map FallbackCache; +#endif Isolation *Iso = nullptr; }; @@ -148,28 +151,29 @@ evmc_result execute(evmc_vm *EVMInstance, const evmc_host_interface *Host, return evmc_make_result(EVMC_FAILURE, 0, 0, nullptr, 0); } } + uint32_t CheckSum = crc32(Code, CodeSize); + uint64_t ModKey = (static_cast(Rev) << 32) | CheckSum; + #ifdef ZEN_ENABLE_JIT_PRECOMPILE_FALLBACK - // Use interpreter mode for bytecode that would be too expensive to JIT. - // The EVMAnalyzer performs a pattern-aware O(n) scan that detects: - // - raw bytecode size / estimated MIR instruction count too large - // - high density of RA-expensive opcodes (SHL/SHR/SAR/MUL/SIGNEXTEND) - // - long consecutive runs of RA-expensive ops - // - DUP-induced feedback loops (b0 pattern) std::unique_ptr TempConfig; if (VM->Config.Mode == RunMode::MultipassMode) { - COMPILER::EVMAnalyzer Analyzer(Rev); - Analyzer.analyze(Code, CodeSize); - const auto &JITResult = Analyzer.getJITSuitability(); - if (JITResult.ShouldFallback) { + auto CacheIt = VM->FallbackCache.find(ModKey); + bool NeedFallback; + if (CacheIt != VM->FallbackCache.end()) { + NeedFallback = CacheIt->second; + } else { + COMPILER::EVMAnalyzer Analyzer(Rev); + Analyzer.analyze(Code, CodeSize); + NeedFallback = Analyzer.getJITSuitability().ShouldFallback; + VM->FallbackCache[ModKey] = NeedFallback; + } + if (NeedFallback) { RuntimeConfig NewConfig = VM->Config; NewConfig.Mode = RunMode::InterpMode; TempConfig = std::make_unique(VM->RT.get(), NewConfig); } } #endif // ZEN_ENABLE_JIT_PRECOMPILE_FALLBACK - - uint32_t CheckSum = crc32(Code, CodeSize); - uint64_t ModKey = (static_cast(Rev) << 32) | CheckSum; std::string ModName = std::to_string(CheckSum) + "_" + std::to_string(static_cast(Rev)); auto ModRet = VM->RT->loadEVMModule(ModName, Code, CodeSize, Rev);