eval.cpp 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/check/eval.h"
  5. #include "toolchain/base/kind_switch.h"
  6. #include "toolchain/check/diagnostic_helpers.h"
  7. #include "toolchain/check/generic.h"
  8. #include "toolchain/check/import_ref.h"
  9. #include "toolchain/diagnostics/diagnostic_emitter.h"
  10. #include "toolchain/diagnostics/format_providers.h"
  11. #include "toolchain/sem_ir/builtin_function_kind.h"
  12. #include "toolchain/sem_ir/function.h"
  13. #include "toolchain/sem_ir/generic.h"
  14. #include "toolchain/sem_ir/ids.h"
  15. #include "toolchain/sem_ir/inst_kind.h"
  16. #include "toolchain/sem_ir/typed_insts.h"
  17. namespace Carbon::Check {
  18. namespace {
  19. // Information about an eval block of a specific that we are currently building.
  20. struct SpecificEvalInfo {
  21. // The region within the specific whose eval block we are building.
  22. SemIR::GenericInstIndex::Region region;
  23. // The work-in-progress contents of the eval block.
  24. llvm::ArrayRef<SemIR::InstId> values;
  25. };
  26. // Information about the context within which we are performing evaluation.
  27. class EvalContext {
  28. public:
  29. explicit EvalContext(
  30. Context& context, SemIRLoc fallback_loc,
  31. SemIR::SpecificId specific_id = SemIR::SpecificId::None,
  32. std::optional<SpecificEvalInfo> specific_eval_info = std::nullopt)
  33. : context_(context),
  34. fallback_loc_(fallback_loc),
  35. specific_id_(specific_id),
  36. specific_eval_info_(specific_eval_info) {}
  37. // Gets the location to use for diagnostics if a better location is
  38. // unavailable.
  39. // TODO: This is also sometimes unavailable.
  40. auto fallback_loc() const -> SemIRLoc { return fallback_loc_; }
  41. // Returns a location to use to point at an instruction in a diagnostic, given
  42. // a list of instructions that might have an attached location. This is the
  43. // location of the first instruction in the list that has a location if there
  44. // is one, and otherwise the fallback location.
  45. auto GetDiagnosticLoc(llvm::ArrayRef<SemIR::InstId> inst_ids) -> SemIRLoc {
  46. for (auto inst_id : inst_ids) {
  47. if (inst_id.has_value() &&
  48. context_.insts().GetLocId(inst_id).has_value()) {
  49. return inst_id;
  50. }
  51. }
  52. return fallback_loc_;
  53. }
  54. // Gets the value of the specified compile-time binding in this context.
  55. // Returns `None` if the value is not fixed in this context.
  56. auto GetCompileTimeBindValue(SemIR::CompileTimeBindIndex bind_index)
  57. -> SemIR::ConstantId {
  58. if (!bind_index.has_value() || !specific_id_.has_value()) {
  59. return SemIR::ConstantId::None;
  60. }
  61. const auto& specific = specifics().Get(specific_id_);
  62. auto args = inst_blocks().Get(specific.args_id);
  63. // Bindings past the ones with known arguments can appear as local
  64. // bindings of entities declared within this generic.
  65. if (static_cast<size_t>(bind_index.index) >= args.size()) {
  66. return SemIR::ConstantId::None;
  67. }
  68. return constant_values().Get(args[bind_index.index]);
  69. }
  70. // Given a constant value from the SemIR we're evaluating, finds the
  71. // corresponding constant value to use in the context of this evaluation.
  72. // This can be different if the original SemIR is for a generic and we are
  73. // evaluating with specific arguments for the generic parameters.
  74. auto GetInContext(SemIR::ConstantId const_id) -> SemIR::ConstantId {
  75. if (!const_id.is_symbolic()) {
  76. return const_id;
  77. }
  78. // While resolving a specific, map from previous instructions in the eval
  79. // block into their evaluated values. These values won't be present on the
  80. // specific itself yet, so `GetConstantInSpecific` won't be able to find
  81. // them.
  82. if (specific_eval_info_) {
  83. const auto& symbolic_info =
  84. constant_values().GetSymbolicConstant(const_id);
  85. if (symbolic_info.index.has_value() &&
  86. symbolic_info.generic_id ==
  87. specifics().Get(specific_id_).generic_id &&
  88. symbolic_info.index.region() == specific_eval_info_->region) {
  89. auto inst_id = specific_eval_info_->values[symbolic_info.index.index()];
  90. CARBON_CHECK(inst_id.has_value(),
  91. "Forward reference in eval block: index {0} referenced "
  92. "before evaluation",
  93. symbolic_info.index.index());
  94. return constant_values().Get(inst_id);
  95. }
  96. }
  97. // Map from a specific constant value to the canonical value.
  98. return GetConstantInSpecific(sem_ir(), specific_id_, const_id);
  99. }
  100. // Gets the constant value of the specified instruction in this context.
  101. auto GetConstantValue(SemIR::InstId inst_id) -> SemIR::ConstantId {
  102. return GetInContext(constant_values().Get(inst_id));
  103. }
  104. // Gets the constant value of the specified type in this context.
  105. auto GetConstantValue(SemIR::TypeId type_id) -> SemIR::ConstantId {
  106. return GetInContext(types().GetConstantId(type_id));
  107. }
  108. // Gets the constant value of the specified type in this context.
  109. auto GetConstantValueAsType(SemIR::TypeId id) -> SemIR::TypeId {
  110. return context().GetTypeIdForTypeConstant(GetConstantValue(id));
  111. }
  112. // Gets the instruction describing the constant value of the specified type in
  113. // this context.
  114. auto GetConstantValueAsInst(SemIR::TypeId id) -> SemIR::Inst {
  115. return insts().Get(
  116. context().constant_values().GetInstId(GetConstantValue(id)));
  117. }
  118. auto ints() -> SharedValueStores::IntStore& { return sem_ir().ints(); }
  119. auto floats() -> SharedValueStores::FloatStore& { return sem_ir().floats(); }
  120. auto entity_names() -> SemIR::EntityNameStore& {
  121. return sem_ir().entity_names();
  122. }
  123. auto functions() -> const ValueStore<SemIR::FunctionId>& {
  124. return sem_ir().functions();
  125. }
  126. auto classes() -> const ValueStore<SemIR::ClassId>& {
  127. return sem_ir().classes();
  128. }
  129. auto interfaces() -> const ValueStore<SemIR::InterfaceId>& {
  130. return sem_ir().interfaces();
  131. }
  132. auto facet_types() -> CanonicalValueStore<SemIR::FacetTypeId>& {
  133. return sem_ir().facet_types();
  134. }
  135. auto specifics() -> const SemIR::SpecificStore& {
  136. return sem_ir().specifics();
  137. }
  138. auto type_blocks() -> SemIR::BlockValueStore<SemIR::TypeBlockId>& {
  139. return sem_ir().type_blocks();
  140. }
  141. auto insts() -> const SemIR::InstStore& { return sem_ir().insts(); }
  142. auto inst_blocks() -> SemIR::InstBlockStore& {
  143. return sem_ir().inst_blocks();
  144. }
  145. // Gets the constant value store. Note that this does not provide the constant
  146. // values that should be used from this evaluation context, and so should be
  147. // used with caution.
  148. auto constant_values() -> const SemIR::ConstantValueStore& {
  149. return sem_ir().constant_values();
  150. }
  151. // Gets the types store. Note that this does not provide the type values that
  152. // should be used from this evaluation context, and so should be used with
  153. // caution.
  154. auto types() -> const SemIR::TypeStore& { return sem_ir().types(); }
  155. auto context() -> Context& { return context_; }
  156. auto sem_ir() -> SemIR::File& { return context().sem_ir(); }
  157. auto emitter() -> Context::DiagnosticEmitter& { return context().emitter(); }
  158. private:
  159. // The type-checking context in which we're performing evaluation.
  160. Context& context_;
  161. // The location to use for diagnostics when a better location isn't available.
  162. SemIRLoc fallback_loc_;
  163. // The specific that we are evaluating within.
  164. SemIR::SpecificId specific_id_;
  165. // If we are currently evaluating an eval block for `specific_id_`,
  166. // information about that evaluation.
  167. std::optional<SpecificEvalInfo> specific_eval_info_;
  168. };
  169. } // namespace
  170. namespace {
  171. // The evaluation phase for an expression, computed by evaluation. These are
  172. // ordered so that the phase of an expression is the numerically highest phase
  173. // of its constituent evaluations. Note that an expression with any runtime
  174. // component is known to have Runtime phase even if it involves an evaluation
  175. // with UnknownDueToError phase.
  176. enum class Phase : uint8_t {
  177. // Value could be entirely and concretely computed.
  178. Template,
  179. // Evaluation phase is symbolic because the expression involves specifically a
  180. // reference to `.Self`.
  181. PeriodSelfSymbolic,
  182. // Evaluation phase is symbolic because the expression involves a reference to
  183. // a symbolic binding.
  184. Symbolic,
  185. // The evaluation phase is unknown because evaluation encountered an
  186. // already-diagnosed semantic or syntax error. This is treated as being
  187. // potentially constant, but with an unknown phase.
  188. UnknownDueToError,
  189. // The expression has runtime phase because of a non-constant subexpression.
  190. Runtime,
  191. };
  192. } // namespace
  193. // Gets the phase in which the value of a constant will become available.
  194. static auto GetPhase(EvalContext& eval_context, SemIR::ConstantId constant_id)
  195. -> Phase {
  196. if (!constant_id.is_constant()) {
  197. return Phase::Runtime;
  198. } else if (constant_id == SemIR::ErrorInst::SingletonConstantId) {
  199. return Phase::UnknownDueToError;
  200. } else if (constant_id.is_template()) {
  201. return Phase::Template;
  202. } else if (eval_context.constant_values().DependsOnGenericParameter(
  203. constant_id)) {
  204. return Phase::Symbolic;
  205. } else {
  206. CARBON_CHECK(constant_id.is_symbolic());
  207. return Phase::PeriodSelfSymbolic;
  208. }
  209. }
  210. // Returns the later of two phases.
  211. static auto LatestPhase(Phase a, Phase b) -> Phase {
  212. return static_cast<Phase>(
  213. std::max(static_cast<uint8_t>(a), static_cast<uint8_t>(b)));
  214. }
  215. // `where` expressions using `.Self` should not be considered symbolic
  216. // - `Interface where .Self impls I and .A = bool` -> template
  217. // - `T:! type` ... `Interface where .A = T` -> symbolic, since uses `T` which
  218. // is symbolic and not due to `.Self`.
  219. static auto UpdatePhaseIgnorePeriodSelf(EvalContext& eval_context,
  220. SemIR::ConstantId constant_id,
  221. Phase* phase) {
  222. Phase constant_phase = GetPhase(eval_context, constant_id);
  223. // Since LatestPhase(x, Phase::Template) == x, this is equivalent to replacing
  224. // Phase::PeriodSelfSymbolic with Phase::Template.
  225. if (constant_phase != Phase::PeriodSelfSymbolic) {
  226. *phase = LatestPhase(*phase, constant_phase);
  227. }
  228. }
  229. // Forms a `constant_id` describing a given evaluation result.
  230. static auto MakeConstantResult(Context& context, SemIR::Inst inst, Phase phase)
  231. -> SemIR::ConstantId {
  232. switch (phase) {
  233. case Phase::Template:
  234. return context.constants().GetOrAdd(inst,
  235. SemIR::ConstantStore::IsTemplate);
  236. case Phase::PeriodSelfSymbolic:
  237. return context.constants().GetOrAdd(
  238. inst, SemIR::ConstantStore::IsPeriodSelfSymbolic);
  239. case Phase::Symbolic:
  240. return context.constants().GetOrAdd(inst,
  241. SemIR::ConstantStore::IsSymbolic);
  242. case Phase::UnknownDueToError:
  243. return SemIR::ErrorInst::SingletonConstantId;
  244. case Phase::Runtime:
  245. return SemIR::ConstantId::NotConstant;
  246. }
  247. }
  248. // Forms a `constant_id` describing why an evaluation was not constant.
  249. static auto MakeNonConstantResult(Phase phase) -> SemIR::ConstantId {
  250. return phase == Phase::UnknownDueToError
  251. ? SemIR::ErrorInst::SingletonConstantId
  252. : SemIR::ConstantId::NotConstant;
  253. }
  254. // Converts a bool value into a ConstantId.
  255. static auto MakeBoolResult(Context& context, SemIR::TypeId bool_type_id,
  256. bool result) -> SemIR::ConstantId {
  257. return MakeConstantResult(
  258. context,
  259. SemIR::BoolLiteral{.type_id = bool_type_id,
  260. .value = SemIR::BoolValue::From(result)},
  261. Phase::Template);
  262. }
  263. // Converts an APInt value into a ConstantId.
  264. static auto MakeIntResult(Context& context, SemIR::TypeId type_id,
  265. bool is_signed, llvm::APInt value)
  266. -> SemIR::ConstantId {
  267. CARBON_CHECK(is_signed == context.types().IsSignedInt(type_id));
  268. auto result = is_signed ? context.ints().AddSigned(std::move(value))
  269. : context.ints().AddUnsigned(std::move(value));
  270. return MakeConstantResult(
  271. context, SemIR::IntValue{.type_id = type_id, .int_id = result},
  272. Phase::Template);
  273. }
  274. // Converts an APFloat value into a ConstantId.
  275. static auto MakeFloatResult(Context& context, SemIR::TypeId type_id,
  276. llvm::APFloat value) -> SemIR::ConstantId {
  277. auto result = context.floats().Add(std::move(value));
  278. return MakeConstantResult(
  279. context, SemIR::FloatLiteral{.type_id = type_id, .float_id = result},
  280. Phase::Template);
  281. }
  282. // `GetConstantValue` checks to see whether the provided ID describes a value
  283. // with constant phase, and if so, returns the corresponding constant value.
  284. // Overloads are provided for different kinds of ID.
  285. // If the given instruction is constant, returns its constant value.
  286. static auto GetConstantValue(EvalContext& eval_context, SemIR::InstId inst_id,
  287. Phase* phase) -> SemIR::InstId {
  288. auto const_id = eval_context.GetConstantValue(inst_id);
  289. *phase = LatestPhase(*phase, GetPhase(eval_context, const_id));
  290. return eval_context.constant_values().GetInstId(const_id);
  291. }
  292. // Given a type which may refer to a generic parameter, returns the
  293. // corresponding type in the evaluation context.
  294. static auto GetConstantValue(EvalContext& eval_context, SemIR::TypeId type_id,
  295. Phase* phase) -> SemIR::TypeId {
  296. auto const_id = eval_context.GetConstantValue(type_id);
  297. *phase = LatestPhase(*phase, GetPhase(eval_context, const_id));
  298. return eval_context.context().GetTypeIdForTypeConstant(const_id);
  299. }
  300. // If the given instruction block contains only constants, returns a
  301. // corresponding block of those values.
  302. static auto GetConstantValue(EvalContext& eval_context,
  303. SemIR::InstBlockId inst_block_id, Phase* phase)
  304. -> SemIR::InstBlockId {
  305. if (!inst_block_id.has_value()) {
  306. return SemIR::InstBlockId::None;
  307. }
  308. auto insts = eval_context.inst_blocks().Get(inst_block_id);
  309. llvm::SmallVector<SemIR::InstId> const_insts;
  310. for (auto inst_id : insts) {
  311. auto const_inst_id = GetConstantValue(eval_context, inst_id, phase);
  312. if (!const_inst_id.has_value()) {
  313. return SemIR::InstBlockId::None;
  314. }
  315. // Once we leave the small buffer, we know the first few elements are all
  316. // constant, so it's likely that the entire block is constant. Resize to the
  317. // target size given that we're going to allocate memory now anyway.
  318. if (const_insts.size() == const_insts.capacity()) {
  319. const_insts.reserve(insts.size());
  320. }
  321. const_insts.push_back(const_inst_id);
  322. }
  323. // TODO: If the new block is identical to the original block, and we know the
  324. // old ID was canonical, return the original ID.
  325. return eval_context.inst_blocks().AddCanonical(const_insts);
  326. }
  327. // Compute the constant value of a type block. This may be different from the
  328. // input type block if we have known generic arguments.
  329. static auto GetConstantValue(EvalContext& eval_context,
  330. SemIR::StructTypeFieldsId fields_id, Phase* phase)
  331. -> SemIR::StructTypeFieldsId {
  332. if (!fields_id.has_value()) {
  333. return SemIR::StructTypeFieldsId::None;
  334. }
  335. auto fields = eval_context.context().struct_type_fields().Get(fields_id);
  336. llvm::SmallVector<SemIR::StructTypeField> new_fields;
  337. for (auto field : fields) {
  338. auto new_type_id = GetConstantValue(eval_context, field.type_id, phase);
  339. if (!new_type_id.has_value()) {
  340. return SemIR::StructTypeFieldsId::None;
  341. }
  342. // Once we leave the small buffer, we know the first few elements are all
  343. // constant, so it's likely that the entire block is constant. Resize to the
  344. // target size given that we're going to allocate memory now anyway.
  345. if (new_fields.size() == new_fields.capacity()) {
  346. new_fields.reserve(fields.size());
  347. }
  348. new_fields.push_back({.name_id = field.name_id, .type_id = new_type_id});
  349. }
  350. // TODO: If the new block is identical to the original block, and we know the
  351. // old ID was canonical, return the original ID.
  352. return eval_context.context().struct_type_fields().AddCanonical(new_fields);
  353. }
  354. // Compute the constant value of a type block. This may be different from the
  355. // input type block if we have known generic arguments.
  356. static auto GetConstantValue(EvalContext& eval_context,
  357. SemIR::TypeBlockId type_block_id, Phase* phase)
  358. -> SemIR::TypeBlockId {
  359. if (!type_block_id.has_value()) {
  360. return SemIR::TypeBlockId::None;
  361. }
  362. auto types = eval_context.type_blocks().Get(type_block_id);
  363. llvm::SmallVector<SemIR::TypeId> new_types;
  364. for (auto type_id : types) {
  365. auto new_type_id = GetConstantValue(eval_context, type_id, phase);
  366. if (!new_type_id.has_value()) {
  367. return SemIR::TypeBlockId::None;
  368. }
  369. // Once we leave the small buffer, we know the first few elements are all
  370. // constant, so it's likely that the entire block is constant. Resize to the
  371. // target size given that we're going to allocate memory now anyway.
  372. if (new_types.size() == new_types.capacity()) {
  373. new_types.reserve(types.size());
  374. }
  375. new_types.push_back(new_type_id);
  376. }
  377. // TODO: If the new block is identical to the original block, and we know the
  378. // old ID was canonical, return the original ID.
  379. return eval_context.type_blocks().AddCanonical(new_types);
  380. }
  381. // The constant value of a specific is the specific with the corresponding
  382. // constant values for its arguments.
  383. static auto GetConstantValue(EvalContext& eval_context,
  384. SemIR::SpecificId specific_id, Phase* phase)
  385. -> SemIR::SpecificId {
  386. if (!specific_id.has_value()) {
  387. return SemIR::SpecificId::None;
  388. }
  389. const auto& specific = eval_context.specifics().Get(specific_id);
  390. auto args_id = GetConstantValue(eval_context, specific.args_id, phase);
  391. if (!args_id.has_value()) {
  392. return SemIR::SpecificId::None;
  393. }
  394. if (args_id == specific.args_id) {
  395. return specific_id;
  396. }
  397. return MakeSpecific(eval_context.context(), eval_context.fallback_loc(),
  398. specific.generic_id, args_id);
  399. }
  400. // Like `GetConstantValue` but does a `FacetTypeId` -> `FacetTypeInfo`
  401. // conversion. Does not perform canonicalization.
  402. static auto GetConstantFacetTypeInfo(EvalContext& eval_context,
  403. SemIR::FacetTypeId facet_type_id,
  404. Phase* phase) -> SemIR::FacetTypeInfo {
  405. SemIR::FacetTypeInfo info = eval_context.facet_types().Get(facet_type_id);
  406. for (auto& interface : info.impls_constraints) {
  407. interface.specific_id =
  408. GetConstantValue(eval_context, interface.specific_id, phase);
  409. }
  410. for (auto& rewrite : info.rewrite_constraints) {
  411. rewrite.lhs_const_id = eval_context.GetInContext(rewrite.lhs_const_id);
  412. rewrite.rhs_const_id = eval_context.GetInContext(rewrite.rhs_const_id);
  413. // `where` requirements using `.Self` should not be considered symbolic
  414. UpdatePhaseIgnorePeriodSelf(eval_context, rewrite.lhs_const_id, phase);
  415. UpdatePhaseIgnorePeriodSelf(eval_context, rewrite.rhs_const_id, phase);
  416. }
  417. // TODO: Process other requirements.
  418. return info;
  419. }
  420. // Replaces the specified field of the given typed instruction with its constant
  421. // value, if it has constant phase. Returns true on success, false if the value
  422. // has runtime phase.
  423. template <typename InstT, typename FieldIdT>
  424. static auto ReplaceFieldWithConstantValue(EvalContext& eval_context,
  425. InstT* inst, FieldIdT InstT::*field,
  426. Phase* phase) -> bool {
  427. auto unwrapped = GetConstantValue(eval_context, inst->*field, phase);
  428. if (!unwrapped.has_value() && (inst->*field).has_value()) {
  429. return false;
  430. }
  431. inst->*field = unwrapped;
  432. return true;
  433. }
  434. // If the specified fields of the given typed instruction have constant values,
  435. // replaces the fields with their constant values and builds a corresponding
  436. // constant value. Otherwise returns `ConstantId::NotConstant`. Returns
  437. // `ErrorInst::SingletonConstantId` if any subexpression is an error.
  438. //
  439. // The constant value is then checked by calling `validate_fn(typed_inst)`,
  440. // which should return a `bool` indicating whether the new constant is valid. If
  441. // validation passes, `transform_fn(typed_inst)` is called to produce the final
  442. // constant instruction, and a corresponding ConstantId for the new constant is
  443. // returned. If validation fails, it should produce a suitable error message.
  444. // `ErrorInst::SingletonConstantId` is returned.
  445. template <typename InstT, typename ValidateFn, typename TransformFn,
  446. typename... EachFieldIdT>
  447. static auto RebuildIfFieldsAreConstantImpl(
  448. EvalContext& eval_context, SemIR::Inst inst, ValidateFn validate_fn,
  449. TransformFn transform_fn, EachFieldIdT InstT::*... each_field_id)
  450. -> SemIR::ConstantId {
  451. // Build a constant instruction by replacing each non-constant operand with
  452. // its constant value.
  453. auto typed_inst = inst.As<InstT>();
  454. Phase phase = Phase::Template;
  455. if ((ReplaceFieldWithConstantValue(eval_context, &typed_inst, each_field_id,
  456. &phase) &&
  457. ...)) {
  458. if (phase == Phase::UnknownDueToError || !validate_fn(typed_inst)) {
  459. return SemIR::ErrorInst::SingletonConstantId;
  460. }
  461. return MakeConstantResult(eval_context.context(), transform_fn(typed_inst),
  462. phase);
  463. }
  464. return MakeNonConstantResult(phase);
  465. }
  466. // Same as above but with an identity transform function.
  467. template <typename InstT, typename ValidateFn, typename... EachFieldIdT>
  468. static auto RebuildAndValidateIfFieldsAreConstant(
  469. EvalContext& eval_context, SemIR::Inst inst, ValidateFn validate_fn,
  470. EachFieldIdT InstT::*... each_field_id) -> SemIR::ConstantId {
  471. return RebuildIfFieldsAreConstantImpl(eval_context, inst, validate_fn,
  472. std::identity{}, each_field_id...);
  473. }
  474. // Same as above but with no validation step.
  475. template <typename InstT, typename TransformFn, typename... EachFieldIdT>
  476. static auto TransformIfFieldsAreConstant(EvalContext& eval_context,
  477. SemIR::Inst inst,
  478. TransformFn transform_fn,
  479. EachFieldIdT InstT::*... each_field_id)
  480. -> SemIR::ConstantId {
  481. return RebuildIfFieldsAreConstantImpl(
  482. eval_context, inst, [](...) { return true; }, transform_fn,
  483. each_field_id...);
  484. }
  485. // Same as above but with no validation or transform step.
  486. template <typename InstT, typename... EachFieldIdT>
  487. static auto RebuildIfFieldsAreConstant(EvalContext& eval_context,
  488. SemIR::Inst inst,
  489. EachFieldIdT InstT::*... each_field_id)
  490. -> SemIR::ConstantId {
  491. return RebuildIfFieldsAreConstantImpl(
  492. eval_context, inst, [](...) { return true; }, std::identity{},
  493. each_field_id...);
  494. }
  495. // Rebuilds the given aggregate initialization instruction as a corresponding
  496. // constant aggregate value, if its elements are all constants.
  497. static auto RebuildInitAsValue(EvalContext& eval_context, SemIR::Inst inst,
  498. SemIR::InstKind value_kind)
  499. -> SemIR::ConstantId {
  500. return TransformIfFieldsAreConstant(
  501. eval_context, inst,
  502. [&](SemIR::AnyAggregateInit result) {
  503. return SemIR::AnyAggregateValue{.kind = value_kind,
  504. .type_id = result.type_id,
  505. .elements_id = result.elements_id};
  506. },
  507. &SemIR::AnyAggregateInit::type_id, &SemIR::AnyAggregateInit::elements_id);
  508. }
  509. // Performs an access into an aggregate, retrieving the specified element.
  510. static auto PerformAggregateAccess(EvalContext& eval_context, SemIR::Inst inst)
  511. -> SemIR::ConstantId {
  512. auto access_inst = inst.As<SemIR::AnyAggregateAccess>();
  513. Phase phase = Phase::Template;
  514. if (ReplaceFieldWithConstantValue(eval_context, &access_inst,
  515. &SemIR::AnyAggregateAccess::aggregate_id,
  516. &phase)) {
  517. if (auto aggregate =
  518. eval_context.insts().TryGetAs<SemIR::AnyAggregateValue>(
  519. access_inst.aggregate_id)) {
  520. auto elements = eval_context.inst_blocks().Get(aggregate->elements_id);
  521. auto index = static_cast<size_t>(access_inst.index.index);
  522. CARBON_CHECK(index < elements.size(), "Access out of bounds.");
  523. // `Phase` is not used here. If this element is a template constant, then
  524. // so is the result of indexing, even if the aggregate also contains a
  525. // symbolic context.
  526. return eval_context.GetConstantValue(elements[index]);
  527. } else {
  528. CARBON_CHECK(phase != Phase::Template,
  529. "Failed to evaluate template constant {0} arg0: {1}", inst,
  530. eval_context.insts().Get(access_inst.aggregate_id));
  531. }
  532. return MakeConstantResult(eval_context.context(), access_inst, phase);
  533. }
  534. return MakeNonConstantResult(phase);
  535. }
  536. // Performs an index into a homogeneous aggregate, retrieving the specified
  537. // element.
  538. static auto PerformArrayIndex(EvalContext& eval_context, SemIR::ArrayIndex inst)
  539. -> SemIR::ConstantId {
  540. Phase phase = Phase::Template;
  541. auto index_id = GetConstantValue(eval_context, inst.index_id, &phase);
  542. if (!index_id.has_value()) {
  543. return MakeNonConstantResult(phase);
  544. }
  545. auto index = eval_context.insts().TryGetAs<SemIR::IntValue>(index_id);
  546. if (!index) {
  547. CARBON_CHECK(phase != Phase::Template,
  548. "Template constant integer should be a literal");
  549. return MakeNonConstantResult(phase);
  550. }
  551. // Array indexing is invalid if the index is constant and out of range,
  552. // regardless of whether the array itself is constant.
  553. const auto& index_val = eval_context.ints().Get(index->int_id);
  554. auto aggregate_type_id = eval_context.GetConstantValueAsType(
  555. eval_context.insts().Get(inst.array_id).type_id());
  556. if (auto array_type =
  557. eval_context.types().TryGetAs<SemIR::ArrayType>(aggregate_type_id)) {
  558. if (auto bound = eval_context.insts().TryGetAs<SemIR::IntValue>(
  559. array_type->bound_id)) {
  560. // This awkward call to `getZExtValue` is a workaround for APInt not
  561. // supporting comparisons between integers of different bit widths.
  562. if (index_val.getActiveBits() > 64 ||
  563. eval_context.ints()
  564. .Get(bound->int_id)
  565. .ule(index_val.getZExtValue())) {
  566. CARBON_DIAGNOSTIC(ArrayIndexOutOfBounds, Error,
  567. "array index `{0}` is past the end of type {1}",
  568. TypedInt, SemIR::TypeId);
  569. eval_context.emitter().Emit(
  570. eval_context.GetDiagnosticLoc(inst.index_id), ArrayIndexOutOfBounds,
  571. {.type = index->type_id, .value = index_val}, aggregate_type_id);
  572. return SemIR::ErrorInst::SingletonConstantId;
  573. }
  574. }
  575. }
  576. auto aggregate_id = GetConstantValue(eval_context, inst.array_id, &phase);
  577. if (!aggregate_id.has_value()) {
  578. return MakeNonConstantResult(phase);
  579. }
  580. auto aggregate =
  581. eval_context.insts().TryGetAs<SemIR::AnyAggregateValue>(aggregate_id);
  582. if (!aggregate) {
  583. CARBON_CHECK(phase != Phase::Template,
  584. "Unexpected representation for template constant aggregate");
  585. return MakeNonConstantResult(phase);
  586. }
  587. auto elements = eval_context.inst_blocks().Get(aggregate->elements_id);
  588. return eval_context.GetConstantValue(elements[index_val.getZExtValue()]);
  589. }
  590. // Enforces that an integer type has a valid bit width.
  591. static auto ValidateIntType(Context& context, SemIRLoc loc,
  592. SemIR::IntType result) -> bool {
  593. auto bit_width =
  594. context.insts().TryGetAs<SemIR::IntValue>(result.bit_width_id);
  595. if (!bit_width) {
  596. // Symbolic bit width.
  597. return true;
  598. }
  599. const auto& bit_width_val = context.ints().Get(bit_width->int_id);
  600. if (bit_width_val.isZero() ||
  601. (context.types().IsSignedInt(bit_width->type_id) &&
  602. bit_width_val.isNegative())) {
  603. CARBON_DIAGNOSTIC(IntWidthNotPositive, Error,
  604. "integer type width of {0} is not positive", TypedInt);
  605. context.emitter().Emit(
  606. loc, IntWidthNotPositive,
  607. {.type = bit_width->type_id, .value = bit_width_val});
  608. return false;
  609. }
  610. if (bit_width_val.ugt(IntStore::MaxIntWidth)) {
  611. CARBON_DIAGNOSTIC(IntWidthTooLarge, Error,
  612. "integer type width of {0} is greater than the "
  613. "maximum supported width of {1}",
  614. TypedInt, int);
  615. context.emitter().Emit(loc, IntWidthTooLarge,
  616. {.type = bit_width->type_id, .value = bit_width_val},
  617. IntStore::MaxIntWidth);
  618. return false;
  619. }
  620. return true;
  621. }
  622. // Forms a constant int type as an evaluation result. Requires that width_id is
  623. // constant.
  624. static auto MakeIntTypeResult(Context& context, SemIRLoc loc,
  625. SemIR::IntKind int_kind, SemIR::InstId width_id,
  626. Phase phase) -> SemIR::ConstantId {
  627. auto result = SemIR::IntType{
  628. .type_id = context.GetSingletonType(SemIR::TypeType::SingletonInstId),
  629. .int_kind = int_kind,
  630. .bit_width_id = width_id};
  631. if (!ValidateIntType(context, loc, result)) {
  632. return SemIR::ErrorInst::SingletonConstantId;
  633. }
  634. return MakeConstantResult(context, result, phase);
  635. }
  636. // Enforces that the bit width is 64 for a float.
  637. static auto ValidateFloatBitWidth(Context& context, SemIRLoc loc,
  638. SemIR::InstId inst_id) -> bool {
  639. auto inst = context.insts().GetAs<SemIR::IntValue>(inst_id);
  640. if (context.ints().Get(inst.int_id) == 64) {
  641. return true;
  642. }
  643. CARBON_DIAGNOSTIC(CompileTimeFloatBitWidth, Error, "bit width must be 64");
  644. context.emitter().Emit(loc, CompileTimeFloatBitWidth);
  645. return false;
  646. }
  647. // Enforces that a float type has a valid bit width.
  648. static auto ValidateFloatType(Context& context, SemIRLoc loc,
  649. SemIR::FloatType result) -> bool {
  650. auto bit_width =
  651. context.insts().TryGetAs<SemIR::IntValue>(result.bit_width_id);
  652. if (!bit_width) {
  653. // Symbolic bit width.
  654. return true;
  655. }
  656. return ValidateFloatBitWidth(context, loc, result.bit_width_id);
  657. }
  658. // Performs a conversion between integer types, truncating if the value doesn't
  659. // fit in the destination type.
  660. static auto PerformIntConvert(Context& context, SemIR::InstId arg_id,
  661. SemIR::TypeId dest_type_id) -> SemIR::ConstantId {
  662. auto arg_val =
  663. context.ints().Get(context.insts().GetAs<SemIR::IntValue>(arg_id).int_id);
  664. auto [dest_is_signed, bit_width_id] =
  665. context.sem_ir().types().GetIntTypeInfo(dest_type_id);
  666. if (bit_width_id.has_value()) {
  667. // TODO: If the value fits in the destination type, reuse the existing
  668. // int_id rather than recomputing it. This is probably the most common case.
  669. bool src_is_signed = context.sem_ir().types().IsSignedInt(
  670. context.insts().Get(arg_id).type_id());
  671. unsigned width = context.ints().Get(bit_width_id).getZExtValue();
  672. arg_val =
  673. src_is_signed ? arg_val.sextOrTrunc(width) : arg_val.zextOrTrunc(width);
  674. }
  675. return MakeIntResult(context, dest_type_id, dest_is_signed, arg_val);
  676. }
  677. // Performs a conversion between integer types, diagnosing if the value doesn't
  678. // fit in the destination type.
  679. static auto PerformCheckedIntConvert(Context& context, SemIRLoc loc,
  680. SemIR::InstId arg_id,
  681. SemIR::TypeId dest_type_id)
  682. -> SemIR::ConstantId {
  683. auto arg = context.insts().GetAs<SemIR::IntValue>(arg_id);
  684. auto arg_val = context.ints().Get(arg.int_id);
  685. auto [is_signed, bit_width_id] =
  686. context.sem_ir().types().GetIntTypeInfo(dest_type_id);
  687. auto width = bit_width_id.has_value()
  688. ? context.ints().Get(bit_width_id).getZExtValue()
  689. : arg_val.getBitWidth();
  690. if (!is_signed && arg_val.isNegative()) {
  691. CARBON_DIAGNOSTIC(
  692. NegativeIntInUnsignedType, Error,
  693. "negative integer value {0} converted to unsigned type {1}", TypedInt,
  694. SemIR::TypeId);
  695. context.emitter().Emit(loc, NegativeIntInUnsignedType,
  696. {.type = arg.type_id, .value = arg_val},
  697. dest_type_id);
  698. }
  699. unsigned arg_non_sign_bits = arg_val.getSignificantBits() - 1;
  700. if (arg_non_sign_bits + is_signed > width) {
  701. CARBON_DIAGNOSTIC(IntTooLargeForType, Error,
  702. "integer value {0} too large for type {1}", TypedInt,
  703. SemIR::TypeId);
  704. context.emitter().Emit(loc, IntTooLargeForType,
  705. {.type = arg.type_id, .value = arg_val},
  706. dest_type_id);
  707. }
  708. return MakeConstantResult(
  709. context, SemIR::IntValue{.type_id = dest_type_id, .int_id = arg.int_id},
  710. Phase::Template);
  711. }
  712. // Issues a diagnostic for a compile-time division by zero.
  713. static auto DiagnoseDivisionByZero(Context& context, SemIRLoc loc) -> void {
  714. CARBON_DIAGNOSTIC(CompileTimeDivisionByZero, Error, "division by zero");
  715. context.emitter().Emit(loc, CompileTimeDivisionByZero);
  716. }
  717. // Get an integer at a suitable bit-width: either `bit_width_id` if it has a
  718. // value, or the canonical width from the value store if not.
  719. static auto GetIntAtSuitableWidth(Context& context, IntId int_id,
  720. IntId bit_width_id) -> llvm::APInt {
  721. return bit_width_id.has_value()
  722. ? context.ints().GetAtWidth(int_id, bit_width_id)
  723. : context.ints().Get(int_id);
  724. }
  725. // Performs a builtin unary integer -> integer operation.
  726. static auto PerformBuiltinUnaryIntOp(Context& context, SemIRLoc loc,
  727. SemIR::BuiltinFunctionKind builtin_kind,
  728. SemIR::InstId arg_id)
  729. -> SemIR::ConstantId {
  730. auto op = context.insts().GetAs<SemIR::IntValue>(arg_id);
  731. auto [is_signed, bit_width_id] =
  732. context.sem_ir().types().GetIntTypeInfo(op.type_id);
  733. llvm::APInt op_val = GetIntAtSuitableWidth(context, op.int_id, bit_width_id);
  734. switch (builtin_kind) {
  735. case SemIR::BuiltinFunctionKind::IntSNegate:
  736. if (op_val.isMinSignedValue()) {
  737. if (bit_width_id.has_value()) {
  738. CARBON_DIAGNOSTIC(CompileTimeIntegerNegateOverflow, Error,
  739. "integer overflow in negation of {0}", TypedInt);
  740. context.emitter().Emit(loc, CompileTimeIntegerNegateOverflow,
  741. {.type = op.type_id, .value = op_val});
  742. } else {
  743. // Widen the integer so we don't overflow into the sign bit.
  744. op_val = op_val.sext(op_val.getBitWidth() +
  745. llvm::APInt::APINT_BITS_PER_WORD);
  746. }
  747. }
  748. op_val.negate();
  749. break;
  750. case SemIR::BuiltinFunctionKind::IntUNegate:
  751. CARBON_CHECK(bit_width_id.has_value(), "Unsigned negate on unsized int");
  752. op_val.negate();
  753. break;
  754. case SemIR::BuiltinFunctionKind::IntComplement:
  755. // TODO: Should we have separate builtins for signed and unsigned
  756. // complement? Like with signed/unsigned negate, these operations do
  757. // different things to the integer value, even though they do the same
  758. // thing to the bits. We treat IntLiteral complement as signed complement,
  759. // given that the result of unsigned complement depends on the bit width.
  760. op_val.flipAllBits();
  761. break;
  762. default:
  763. CARBON_FATAL("Unexpected builtin kind");
  764. }
  765. return MakeIntResult(context, op.type_id, is_signed, std::move(op_val));
  766. }
  767. namespace {
  768. // A pair of APInts that are the operands of a binary operator. We use an
  769. // aggregate rather than `std::pair` to allow RVO of the individual ints.
  770. struct APIntBinaryOperands {
  771. llvm::APInt lhs;
  772. llvm::APInt rhs;
  773. };
  774. } // namespace
  775. // Get a pair of integers at the same suitable bit-width: either their actual
  776. // width if they have a fixed width, or the smallest canonical width in which
  777. // they both fit otherwise.
  778. static auto GetIntsAtSuitableWidth(Context& context, IntId lhs_id, IntId rhs_id,
  779. IntId bit_width_id) -> APIntBinaryOperands {
  780. // Unsized operands: take the wider of the bit widths.
  781. if (!bit_width_id.has_value()) {
  782. APIntBinaryOperands result = {.lhs = context.ints().Get(lhs_id),
  783. .rhs = context.ints().Get(rhs_id)};
  784. if (result.lhs.getBitWidth() != result.rhs.getBitWidth()) {
  785. if (result.lhs.getBitWidth() > result.rhs.getBitWidth()) {
  786. result.rhs = result.rhs.sext(result.lhs.getBitWidth());
  787. } else {
  788. result.lhs = result.lhs.sext(result.rhs.getBitWidth());
  789. }
  790. }
  791. return result;
  792. }
  793. return {.lhs = context.ints().GetAtWidth(lhs_id, bit_width_id),
  794. .rhs = context.ints().GetAtWidth(rhs_id, bit_width_id)};
  795. }
  796. namespace {
  797. // The result of performing a binary int operation.
  798. struct BinaryIntOpResult {
  799. llvm::APInt result_val;
  800. bool overflow;
  801. Lex::TokenKind op_token;
  802. };
  803. } // namespace
  804. // Computes the result of a homogeneous binary (int, int) -> int operation.
  805. static auto ComputeBinaryIntOpResult(SemIR::BuiltinFunctionKind builtin_kind,
  806. const llvm::APInt& lhs_val,
  807. const llvm::APInt& rhs_val)
  808. -> BinaryIntOpResult {
  809. llvm::APInt result_val;
  810. bool overflow = false;
  811. Lex::TokenKind op_token = Lex::TokenKind::Not;
  812. switch (builtin_kind) {
  813. // Arithmetic.
  814. case SemIR::BuiltinFunctionKind::IntSAdd:
  815. result_val = lhs_val.sadd_ov(rhs_val, overflow);
  816. op_token = Lex::TokenKind::Plus;
  817. break;
  818. case SemIR::BuiltinFunctionKind::IntSSub:
  819. result_val = lhs_val.ssub_ov(rhs_val, overflow);
  820. op_token = Lex::TokenKind::Minus;
  821. break;
  822. case SemIR::BuiltinFunctionKind::IntSMul:
  823. result_val = lhs_val.smul_ov(rhs_val, overflow);
  824. op_token = Lex::TokenKind::Star;
  825. break;
  826. case SemIR::BuiltinFunctionKind::IntSDiv:
  827. result_val = lhs_val.sdiv_ov(rhs_val, overflow);
  828. op_token = Lex::TokenKind::Slash;
  829. break;
  830. case SemIR::BuiltinFunctionKind::IntSMod:
  831. result_val = lhs_val.srem(rhs_val);
  832. // LLVM weirdly lacks `srem_ov`, so we work it out for ourselves:
  833. // <signed min> % -1 overflows because <signed min> / -1 overflows.
  834. overflow = lhs_val.isMinSignedValue() && rhs_val.isAllOnes();
  835. op_token = Lex::TokenKind::Percent;
  836. break;
  837. case SemIR::BuiltinFunctionKind::IntUAdd:
  838. result_val = lhs_val + rhs_val;
  839. op_token = Lex::TokenKind::Plus;
  840. break;
  841. case SemIR::BuiltinFunctionKind::IntUSub:
  842. result_val = lhs_val - rhs_val;
  843. op_token = Lex::TokenKind::Minus;
  844. break;
  845. case SemIR::BuiltinFunctionKind::IntUMul:
  846. result_val = lhs_val * rhs_val;
  847. op_token = Lex::TokenKind::Star;
  848. break;
  849. case SemIR::BuiltinFunctionKind::IntUDiv:
  850. result_val = lhs_val.udiv(rhs_val);
  851. op_token = Lex::TokenKind::Slash;
  852. break;
  853. case SemIR::BuiltinFunctionKind::IntUMod:
  854. result_val = lhs_val.urem(rhs_val);
  855. op_token = Lex::TokenKind::Percent;
  856. break;
  857. // Bitwise.
  858. case SemIR::BuiltinFunctionKind::IntAnd:
  859. result_val = lhs_val & rhs_val;
  860. op_token = Lex::TokenKind::And;
  861. break;
  862. case SemIR::BuiltinFunctionKind::IntOr:
  863. result_val = lhs_val | rhs_val;
  864. op_token = Lex::TokenKind::Pipe;
  865. break;
  866. case SemIR::BuiltinFunctionKind::IntXor:
  867. result_val = lhs_val ^ rhs_val;
  868. op_token = Lex::TokenKind::Caret;
  869. break;
  870. case SemIR::BuiltinFunctionKind::IntLeftShift:
  871. case SemIR::BuiltinFunctionKind::IntRightShift:
  872. CARBON_FATAL("Non-homogeneous operation handled separately.");
  873. default:
  874. CARBON_FATAL("Unexpected operation kind.");
  875. }
  876. return {.result_val = std::move(result_val),
  877. .overflow = overflow,
  878. .op_token = op_token};
  879. }
  880. // Performs a builtin integer bit shift operation.
  881. static auto PerformBuiltinIntShiftOp(Context& context, SemIRLoc loc,
  882. SemIR::BuiltinFunctionKind builtin_kind,
  883. SemIR::InstId lhs_id, SemIR::InstId rhs_id)
  884. -> SemIR::ConstantId {
  885. auto lhs = context.insts().GetAs<SemIR::IntValue>(lhs_id);
  886. auto rhs = context.insts().GetAs<SemIR::IntValue>(rhs_id);
  887. auto [lhs_is_signed, lhs_bit_width_id] =
  888. context.sem_ir().types().GetIntTypeInfo(lhs.type_id);
  889. llvm::APInt lhs_val =
  890. GetIntAtSuitableWidth(context, lhs.int_id, lhs_bit_width_id);
  891. const auto& rhs_orig_val = context.ints().Get(rhs.int_id);
  892. if (lhs_bit_width_id.has_value() && rhs_orig_val.uge(lhs_val.getBitWidth())) {
  893. CARBON_DIAGNOSTIC(
  894. CompileTimeShiftOutOfRange, Error,
  895. "shift distance >= type width of {0} in `{1} {2:<<|>>} {3}`", unsigned,
  896. TypedInt, BoolAsSelect, TypedInt);
  897. context.emitter().Emit(
  898. loc, CompileTimeShiftOutOfRange, lhs_val.getBitWidth(),
  899. {.type = lhs.type_id, .value = lhs_val},
  900. builtin_kind == SemIR::BuiltinFunctionKind::IntLeftShift,
  901. {.type = rhs.type_id, .value = rhs_orig_val});
  902. // TODO: Is it useful to recover by returning 0 or -1?
  903. return SemIR::ErrorInst::SingletonConstantId;
  904. }
  905. if (rhs_orig_val.isNegative() &&
  906. context.sem_ir().types().IsSignedInt(rhs.type_id)) {
  907. CARBON_DIAGNOSTIC(CompileTimeShiftNegative, Error,
  908. "shift distance negative in `{0} {1:<<|>>} {2}`",
  909. TypedInt, BoolAsSelect, TypedInt);
  910. context.emitter().Emit(
  911. loc, CompileTimeShiftNegative, {.type = lhs.type_id, .value = lhs_val},
  912. builtin_kind == SemIR::BuiltinFunctionKind::IntLeftShift,
  913. {.type = rhs.type_id, .value = rhs_orig_val});
  914. // TODO: Is it useful to recover by returning 0 or -1?
  915. return SemIR::ErrorInst::SingletonConstantId;
  916. }
  917. llvm::APInt result_val;
  918. if (builtin_kind == SemIR::BuiltinFunctionKind::IntLeftShift) {
  919. if (!lhs_bit_width_id.has_value() && !lhs_val.isZero()) {
  920. // Ensure we don't generate a ridiculously large integer through a bit
  921. // shift.
  922. auto width = rhs_orig_val.trySExtValue();
  923. if (!width ||
  924. *width > IntStore::MaxIntWidth - lhs_val.getSignificantBits()) {
  925. CARBON_DIAGNOSTIC(CompileTimeUnsizedShiftOutOfRange, Error,
  926. "shift distance of {0} would result in an "
  927. "integer whose width is greater than the "
  928. "maximum supported width of {1}",
  929. TypedInt, int);
  930. context.emitter().Emit(loc, CompileTimeUnsizedShiftOutOfRange,
  931. {.type = rhs.type_id, .value = rhs_orig_val},
  932. IntStore::MaxIntWidth);
  933. return SemIR::ErrorInst::SingletonConstantId;
  934. }
  935. lhs_val = lhs_val.sext(
  936. IntStore::CanonicalBitWidth(lhs_val.getSignificantBits() + *width));
  937. }
  938. result_val =
  939. lhs_val.shl(rhs_orig_val.getLimitedValue(lhs_val.getBitWidth()));
  940. } else if (lhs_is_signed) {
  941. result_val =
  942. lhs_val.ashr(rhs_orig_val.getLimitedValue(lhs_val.getBitWidth()));
  943. } else {
  944. CARBON_CHECK(lhs_bit_width_id.has_value(), "Logical shift on unsized int");
  945. result_val =
  946. lhs_val.lshr(rhs_orig_val.getLimitedValue(lhs_val.getBitWidth()));
  947. }
  948. return MakeIntResult(context, lhs.type_id, lhs_is_signed,
  949. std::move(result_val));
  950. }
  951. // Performs a homogeneous builtin binary integer -> integer operation.
  952. static auto PerformBuiltinBinaryIntOp(Context& context, SemIRLoc loc,
  953. SemIR::BuiltinFunctionKind builtin_kind,
  954. SemIR::InstId lhs_id,
  955. SemIR::InstId rhs_id)
  956. -> SemIR::ConstantId {
  957. auto lhs = context.insts().GetAs<SemIR::IntValue>(lhs_id);
  958. auto rhs = context.insts().GetAs<SemIR::IntValue>(rhs_id);
  959. CARBON_CHECK(rhs.type_id == lhs.type_id, "Heterogeneous builtin integer op!");
  960. auto type_id = lhs.type_id;
  961. auto [is_signed, bit_width_id] =
  962. context.sem_ir().types().GetIntTypeInfo(type_id);
  963. auto [lhs_val, rhs_val] =
  964. GetIntsAtSuitableWidth(context, lhs.int_id, rhs.int_id, bit_width_id);
  965. // Check for division by zero.
  966. switch (builtin_kind) {
  967. case SemIR::BuiltinFunctionKind::IntSDiv:
  968. case SemIR::BuiltinFunctionKind::IntSMod:
  969. case SemIR::BuiltinFunctionKind::IntUDiv:
  970. case SemIR::BuiltinFunctionKind::IntUMod:
  971. if (rhs_val.isZero()) {
  972. DiagnoseDivisionByZero(context, loc);
  973. return SemIR::ErrorInst::SingletonConstantId;
  974. }
  975. break;
  976. default:
  977. break;
  978. }
  979. BinaryIntOpResult result =
  980. ComputeBinaryIntOpResult(builtin_kind, lhs_val, rhs_val);
  981. if (result.overflow && !bit_width_id.has_value()) {
  982. // Retry with a larger bit width. Most operations can only overflow by one
  983. // bit, but signed n-bit multiplication can overflow to 2n-1 bits. We don't
  984. // need to handle unsigned multiplication here because it's not permitted
  985. // for unsized integers.
  986. //
  987. // Note that we speculatively first perform the calculation in the width of
  988. // the wider operand: smaller operations are faster and overflow to a wider
  989. // integer is unlikely to be needed, especially given that the width will
  990. // have been rounded up to a multiple of 64 bits by the int store.
  991. CARBON_CHECK(builtin_kind != SemIR::BuiltinFunctionKind::IntUMul,
  992. "Unsigned arithmetic requires a fixed bitwidth");
  993. int new_width =
  994. builtin_kind == SemIR::BuiltinFunctionKind::IntSMul
  995. ? lhs_val.getBitWidth() * 2
  996. : IntStore::CanonicalBitWidth(lhs_val.getBitWidth() + 1);
  997. new_width = std::min(new_width, IntStore::MaxIntWidth);
  998. lhs_val = context.ints().GetAtWidth(lhs.int_id, new_width);
  999. rhs_val = context.ints().GetAtWidth(rhs.int_id, new_width);
  1000. // Note that this can in theory still overflow if we limited `new_width` to
  1001. // `MaxIntWidth`. In that case we fall through to the signed overflow
  1002. // diagnostic below.
  1003. result = ComputeBinaryIntOpResult(builtin_kind, lhs_val, rhs_val);
  1004. CARBON_CHECK(!result.overflow || new_width == IntStore::MaxIntWidth);
  1005. }
  1006. if (result.overflow) {
  1007. CARBON_DIAGNOSTIC(CompileTimeIntegerOverflow, Error,
  1008. "integer overflow in calculation `{0} {1} {2}`", TypedInt,
  1009. Lex::TokenKind, TypedInt);
  1010. context.emitter().Emit(loc, CompileTimeIntegerOverflow,
  1011. {.type = type_id, .value = lhs_val}, result.op_token,
  1012. {.type = type_id, .value = rhs_val});
  1013. }
  1014. return MakeIntResult(context, type_id, is_signed,
  1015. std::move(result.result_val));
  1016. }
  1017. // Performs a builtin integer comparison.
  1018. static auto PerformBuiltinIntComparison(Context& context,
  1019. SemIR::BuiltinFunctionKind builtin_kind,
  1020. SemIR::InstId lhs_id,
  1021. SemIR::InstId rhs_id,
  1022. SemIR::TypeId bool_type_id)
  1023. -> SemIR::ConstantId {
  1024. auto lhs = context.insts().GetAs<SemIR::IntValue>(lhs_id);
  1025. auto rhs = context.insts().GetAs<SemIR::IntValue>(rhs_id);
  1026. llvm::APInt lhs_val = context.ints().Get(lhs.int_id);
  1027. llvm::APInt rhs_val = context.ints().Get(rhs.int_id);
  1028. bool result;
  1029. switch (builtin_kind) {
  1030. case SemIR::BuiltinFunctionKind::IntEq:
  1031. result = (lhs_val == rhs_val);
  1032. break;
  1033. case SemIR::BuiltinFunctionKind::IntNeq:
  1034. result = (lhs_val != rhs_val);
  1035. break;
  1036. case SemIR::BuiltinFunctionKind::IntLess:
  1037. result = lhs_val.slt(rhs_val);
  1038. break;
  1039. case SemIR::BuiltinFunctionKind::IntLessEq:
  1040. result = lhs_val.sle(rhs_val);
  1041. break;
  1042. case SemIR::BuiltinFunctionKind::IntGreater:
  1043. result = lhs_val.sgt(rhs_val);
  1044. break;
  1045. case SemIR::BuiltinFunctionKind::IntGreaterEq:
  1046. result = lhs_val.sge(rhs_val);
  1047. break;
  1048. default:
  1049. CARBON_FATAL("Unexpected operation kind.");
  1050. }
  1051. return MakeBoolResult(context, bool_type_id, result);
  1052. }
  1053. // Performs a builtin unary float -> float operation.
  1054. static auto PerformBuiltinUnaryFloatOp(Context& context,
  1055. SemIR::BuiltinFunctionKind builtin_kind,
  1056. SemIR::InstId arg_id)
  1057. -> SemIR::ConstantId {
  1058. auto op = context.insts().GetAs<SemIR::FloatLiteral>(arg_id);
  1059. auto op_val = context.floats().Get(op.float_id);
  1060. switch (builtin_kind) {
  1061. case SemIR::BuiltinFunctionKind::FloatNegate:
  1062. op_val.changeSign();
  1063. break;
  1064. default:
  1065. CARBON_FATAL("Unexpected builtin kind");
  1066. }
  1067. return MakeFloatResult(context, op.type_id, std::move(op_val));
  1068. }
  1069. // Performs a builtin binary float -> float operation.
  1070. static auto PerformBuiltinBinaryFloatOp(Context& context,
  1071. SemIR::BuiltinFunctionKind builtin_kind,
  1072. SemIR::InstId lhs_id,
  1073. SemIR::InstId rhs_id)
  1074. -> SemIR::ConstantId {
  1075. auto lhs = context.insts().GetAs<SemIR::FloatLiteral>(lhs_id);
  1076. auto rhs = context.insts().GetAs<SemIR::FloatLiteral>(rhs_id);
  1077. auto lhs_val = context.floats().Get(lhs.float_id);
  1078. auto rhs_val = context.floats().Get(rhs.float_id);
  1079. llvm::APFloat result_val(lhs_val.getSemantics());
  1080. switch (builtin_kind) {
  1081. case SemIR::BuiltinFunctionKind::FloatAdd:
  1082. result_val = lhs_val + rhs_val;
  1083. break;
  1084. case SemIR::BuiltinFunctionKind::FloatSub:
  1085. result_val = lhs_val - rhs_val;
  1086. break;
  1087. case SemIR::BuiltinFunctionKind::FloatMul:
  1088. result_val = lhs_val * rhs_val;
  1089. break;
  1090. case SemIR::BuiltinFunctionKind::FloatDiv:
  1091. result_val = lhs_val / rhs_val;
  1092. break;
  1093. default:
  1094. CARBON_FATAL("Unexpected operation kind.");
  1095. }
  1096. return MakeFloatResult(context, lhs.type_id, std::move(result_val));
  1097. }
  1098. // Performs a builtin float comparison.
  1099. static auto PerformBuiltinFloatComparison(
  1100. Context& context, SemIR::BuiltinFunctionKind builtin_kind,
  1101. SemIR::InstId lhs_id, SemIR::InstId rhs_id, SemIR::TypeId bool_type_id)
  1102. -> SemIR::ConstantId {
  1103. auto lhs = context.insts().GetAs<SemIR::FloatLiteral>(lhs_id);
  1104. auto rhs = context.insts().GetAs<SemIR::FloatLiteral>(rhs_id);
  1105. const auto& lhs_val = context.floats().Get(lhs.float_id);
  1106. const auto& rhs_val = context.floats().Get(rhs.float_id);
  1107. bool result;
  1108. switch (builtin_kind) {
  1109. case SemIR::BuiltinFunctionKind::FloatEq:
  1110. result = (lhs_val == rhs_val);
  1111. break;
  1112. case SemIR::BuiltinFunctionKind::FloatNeq:
  1113. result = (lhs_val != rhs_val);
  1114. break;
  1115. case SemIR::BuiltinFunctionKind::FloatLess:
  1116. result = lhs_val < rhs_val;
  1117. break;
  1118. case SemIR::BuiltinFunctionKind::FloatLessEq:
  1119. result = lhs_val <= rhs_val;
  1120. break;
  1121. case SemIR::BuiltinFunctionKind::FloatGreater:
  1122. result = lhs_val > rhs_val;
  1123. break;
  1124. case SemIR::BuiltinFunctionKind::FloatGreaterEq:
  1125. result = lhs_val >= rhs_val;
  1126. break;
  1127. default:
  1128. CARBON_FATAL("Unexpected operation kind.");
  1129. }
  1130. return MakeBoolResult(context, bool_type_id, result);
  1131. }
  1132. // Performs a builtin boolean comparison.
  1133. static auto PerformBuiltinBoolComparison(
  1134. Context& context, SemIR::BuiltinFunctionKind builtin_kind,
  1135. SemIR::InstId lhs_id, SemIR::InstId rhs_id, SemIR::TypeId bool_type_id) {
  1136. bool lhs = context.insts().GetAs<SemIR::BoolLiteral>(lhs_id).value.ToBool();
  1137. bool rhs = context.insts().GetAs<SemIR::BoolLiteral>(rhs_id).value.ToBool();
  1138. return MakeBoolResult(context, bool_type_id,
  1139. builtin_kind == SemIR::BuiltinFunctionKind::BoolEq
  1140. ? lhs == rhs
  1141. : lhs != rhs);
  1142. }
  1143. // Returns a constant for a call to a builtin function.
  1144. static auto MakeConstantForBuiltinCall(Context& context, SemIRLoc loc,
  1145. SemIR::Call call,
  1146. SemIR::BuiltinFunctionKind builtin_kind,
  1147. llvm::ArrayRef<SemIR::InstId> arg_ids,
  1148. Phase phase) -> SemIR::ConstantId {
  1149. switch (builtin_kind) {
  1150. case SemIR::BuiltinFunctionKind::None:
  1151. CARBON_FATAL("Not a builtin function.");
  1152. case SemIR::BuiltinFunctionKind::PrintChar:
  1153. case SemIR::BuiltinFunctionKind::PrintInt:
  1154. case SemIR::BuiltinFunctionKind::ReadChar: {
  1155. // These are runtime-only builtins.
  1156. // TODO: Consider tracking this on the `BuiltinFunctionKind`.
  1157. return SemIR::ConstantId::NotConstant;
  1158. }
  1159. case SemIR::BuiltinFunctionKind::IntLiteralMakeType: {
  1160. return context.constant_values().Get(
  1161. SemIR::IntLiteralType::SingletonInstId);
  1162. }
  1163. case SemIR::BuiltinFunctionKind::IntMakeTypeSigned: {
  1164. return MakeIntTypeResult(context, loc, SemIR::IntKind::Signed, arg_ids[0],
  1165. phase);
  1166. }
  1167. case SemIR::BuiltinFunctionKind::IntMakeTypeUnsigned: {
  1168. return MakeIntTypeResult(context, loc, SemIR::IntKind::Unsigned,
  1169. arg_ids[0], phase);
  1170. }
  1171. case SemIR::BuiltinFunctionKind::FloatMakeType: {
  1172. // TODO: Support a symbolic constant width.
  1173. if (phase != Phase::Template) {
  1174. break;
  1175. }
  1176. if (!ValidateFloatBitWidth(context, loc, arg_ids[0])) {
  1177. return SemIR::ErrorInst::SingletonConstantId;
  1178. }
  1179. return context.constant_values().Get(
  1180. SemIR::LegacyFloatType::SingletonInstId);
  1181. }
  1182. case SemIR::BuiltinFunctionKind::BoolMakeType: {
  1183. return context.constant_values().Get(SemIR::BoolType::SingletonInstId);
  1184. }
  1185. // Integer conversions.
  1186. case SemIR::BuiltinFunctionKind::IntConvert: {
  1187. if (phase == Phase::Symbolic) {
  1188. return MakeConstantResult(context, call, phase);
  1189. }
  1190. return PerformIntConvert(context, arg_ids[0], call.type_id);
  1191. }
  1192. case SemIR::BuiltinFunctionKind::IntConvertChecked: {
  1193. if (phase == Phase::Symbolic) {
  1194. return MakeConstantResult(context, call, phase);
  1195. }
  1196. return PerformCheckedIntConvert(context, loc, arg_ids[0], call.type_id);
  1197. }
  1198. // Unary integer -> integer operations.
  1199. case SemIR::BuiltinFunctionKind::IntSNegate:
  1200. case SemIR::BuiltinFunctionKind::IntUNegate:
  1201. case SemIR::BuiltinFunctionKind::IntComplement: {
  1202. if (phase != Phase::Template) {
  1203. break;
  1204. }
  1205. return PerformBuiltinUnaryIntOp(context, loc, builtin_kind, arg_ids[0]);
  1206. }
  1207. // Homogeneous binary integer -> integer operations.
  1208. case SemIR::BuiltinFunctionKind::IntSAdd:
  1209. case SemIR::BuiltinFunctionKind::IntSSub:
  1210. case SemIR::BuiltinFunctionKind::IntSMul:
  1211. case SemIR::BuiltinFunctionKind::IntSDiv:
  1212. case SemIR::BuiltinFunctionKind::IntSMod:
  1213. case SemIR::BuiltinFunctionKind::IntUAdd:
  1214. case SemIR::BuiltinFunctionKind::IntUSub:
  1215. case SemIR::BuiltinFunctionKind::IntUMul:
  1216. case SemIR::BuiltinFunctionKind::IntUDiv:
  1217. case SemIR::BuiltinFunctionKind::IntUMod:
  1218. case SemIR::BuiltinFunctionKind::IntAnd:
  1219. case SemIR::BuiltinFunctionKind::IntOr:
  1220. case SemIR::BuiltinFunctionKind::IntXor: {
  1221. if (phase != Phase::Template) {
  1222. break;
  1223. }
  1224. return PerformBuiltinBinaryIntOp(context, loc, builtin_kind, arg_ids[0],
  1225. arg_ids[1]);
  1226. }
  1227. // Bit shift operations.
  1228. case SemIR::BuiltinFunctionKind::IntLeftShift:
  1229. case SemIR::BuiltinFunctionKind::IntRightShift: {
  1230. if (phase != Phase::Template) {
  1231. break;
  1232. }
  1233. return PerformBuiltinIntShiftOp(context, loc, builtin_kind, arg_ids[0],
  1234. arg_ids[1]);
  1235. }
  1236. // Integer comparisons.
  1237. case SemIR::BuiltinFunctionKind::IntEq:
  1238. case SemIR::BuiltinFunctionKind::IntNeq:
  1239. case SemIR::BuiltinFunctionKind::IntLess:
  1240. case SemIR::BuiltinFunctionKind::IntLessEq:
  1241. case SemIR::BuiltinFunctionKind::IntGreater:
  1242. case SemIR::BuiltinFunctionKind::IntGreaterEq: {
  1243. if (phase != Phase::Template) {
  1244. break;
  1245. }
  1246. return PerformBuiltinIntComparison(context, builtin_kind, arg_ids[0],
  1247. arg_ids[1], call.type_id);
  1248. }
  1249. // Unary float -> float operations.
  1250. case SemIR::BuiltinFunctionKind::FloatNegate: {
  1251. if (phase != Phase::Template) {
  1252. break;
  1253. }
  1254. return PerformBuiltinUnaryFloatOp(context, builtin_kind, arg_ids[0]);
  1255. }
  1256. // Binary float -> float operations.
  1257. case SemIR::BuiltinFunctionKind::FloatAdd:
  1258. case SemIR::BuiltinFunctionKind::FloatSub:
  1259. case SemIR::BuiltinFunctionKind::FloatMul:
  1260. case SemIR::BuiltinFunctionKind::FloatDiv: {
  1261. if (phase != Phase::Template) {
  1262. break;
  1263. }
  1264. return PerformBuiltinBinaryFloatOp(context, builtin_kind, arg_ids[0],
  1265. arg_ids[1]);
  1266. }
  1267. // Float comparisons.
  1268. case SemIR::BuiltinFunctionKind::FloatEq:
  1269. case SemIR::BuiltinFunctionKind::FloatNeq:
  1270. case SemIR::BuiltinFunctionKind::FloatLess:
  1271. case SemIR::BuiltinFunctionKind::FloatLessEq:
  1272. case SemIR::BuiltinFunctionKind::FloatGreater:
  1273. case SemIR::BuiltinFunctionKind::FloatGreaterEq: {
  1274. if (phase != Phase::Template) {
  1275. break;
  1276. }
  1277. return PerformBuiltinFloatComparison(context, builtin_kind, arg_ids[0],
  1278. arg_ids[1], call.type_id);
  1279. }
  1280. // Bool comparisons.
  1281. case SemIR::BuiltinFunctionKind::BoolEq:
  1282. case SemIR::BuiltinFunctionKind::BoolNeq: {
  1283. if (phase != Phase::Template) {
  1284. break;
  1285. }
  1286. return PerformBuiltinBoolComparison(context, builtin_kind, arg_ids[0],
  1287. arg_ids[1], call.type_id);
  1288. }
  1289. }
  1290. return SemIR::ConstantId::NotConstant;
  1291. }
  1292. // Makes a constant for a call instruction.
  1293. static auto MakeConstantForCall(EvalContext& eval_context, SemIRLoc loc,
  1294. SemIR::Call call) -> SemIR::ConstantId {
  1295. Phase phase = Phase::Template;
  1296. // A call with an invalid argument list is used to represent an erroneous
  1297. // call.
  1298. //
  1299. // TODO: Use a better representation for this.
  1300. if (call.args_id == SemIR::InstBlockId::None) {
  1301. return SemIR::ErrorInst::SingletonConstantId;
  1302. }
  1303. // Find the constant value of the callee.
  1304. bool has_constant_callee = ReplaceFieldWithConstantValue(
  1305. eval_context, &call, &SemIR::Call::callee_id, &phase);
  1306. auto callee_function =
  1307. SemIR::GetCalleeFunction(eval_context.sem_ir(), call.callee_id);
  1308. auto builtin_kind = SemIR::BuiltinFunctionKind::None;
  1309. if (callee_function.function_id.has_value()) {
  1310. // Calls to builtins might be constant.
  1311. builtin_kind = eval_context.functions()
  1312. .Get(callee_function.function_id)
  1313. .builtin_function_kind;
  1314. if (builtin_kind == SemIR::BuiltinFunctionKind::None) {
  1315. // TODO: Eventually we'll want to treat some kinds of non-builtin
  1316. // functions as producing constants.
  1317. return SemIR::ConstantId::NotConstant;
  1318. }
  1319. } else {
  1320. // Calls to non-functions, such as calls to generic entity names, might be
  1321. // constant.
  1322. }
  1323. // Find the argument values and the return type.
  1324. bool has_constant_operands =
  1325. has_constant_callee &&
  1326. ReplaceFieldWithConstantValue(eval_context, &call, &SemIR::Call::type_id,
  1327. &phase) &&
  1328. ReplaceFieldWithConstantValue(eval_context, &call, &SemIR::Call::args_id,
  1329. &phase);
  1330. if (phase == Phase::UnknownDueToError) {
  1331. return SemIR::ErrorInst::SingletonConstantId;
  1332. }
  1333. // If any operand of the call is non-constant, the call is non-constant.
  1334. // TODO: Some builtin calls might allow some operands to be non-constant.
  1335. if (!has_constant_operands) {
  1336. if (builtin_kind.IsCompTimeOnly(
  1337. eval_context.sem_ir(), eval_context.inst_blocks().Get(call.args_id),
  1338. call.type_id)) {
  1339. CARBON_DIAGNOSTIC(NonConstantCallToCompTimeOnlyFunction, Error,
  1340. "non-constant call to compile-time-only function");
  1341. CARBON_DIAGNOSTIC(CompTimeOnlyFunctionHere, Note,
  1342. "compile-time-only function declared here");
  1343. eval_context.emitter()
  1344. .Build(loc, NonConstantCallToCompTimeOnlyFunction)
  1345. .Note(eval_context.functions()
  1346. .Get(callee_function.function_id)
  1347. .latest_decl_id(),
  1348. CompTimeOnlyFunctionHere)
  1349. .Emit();
  1350. }
  1351. return SemIR::ConstantId::NotConstant;
  1352. }
  1353. // Handle calls to builtins.
  1354. if (builtin_kind != SemIR::BuiltinFunctionKind::None) {
  1355. return MakeConstantForBuiltinCall(
  1356. eval_context.context(), loc, call, builtin_kind,
  1357. eval_context.inst_blocks().Get(call.args_id), phase);
  1358. }
  1359. return SemIR::ConstantId::NotConstant;
  1360. }
  1361. // Creates a FacetType constant.
  1362. static auto MakeFacetTypeResult(Context& context,
  1363. const SemIR::FacetTypeInfo& info, Phase phase)
  1364. -> SemIR::ConstantId {
  1365. SemIR::FacetTypeId facet_type_id = context.facet_types().Add(info);
  1366. return MakeConstantResult(
  1367. context,
  1368. SemIR::FacetType{.type_id = SemIR::TypeType::SingletonTypeId,
  1369. .facet_type_id = facet_type_id},
  1370. phase);
  1371. }
  1372. // Implementation for `TryEvalInst`, wrapping `Context` with `EvalContext`.
  1373. //
  1374. // Tail call should not be diagnosed as recursion.
  1375. // https://github.com/llvm/llvm-project/issues/125724
  1376. // NOLINTNEXTLINE(misc-no-recursion): Tail call.
  1377. static auto TryEvalInstInContext(EvalContext& eval_context,
  1378. SemIR::InstId inst_id, SemIR::Inst inst)
  1379. -> SemIR::ConstantId {
  1380. // TODO: Ensure we have test coverage for each of these cases that can result
  1381. // in a constant, once those situations are all reachable.
  1382. CARBON_KIND_SWITCH(inst) {
  1383. // These cases are constants if their operands are.
  1384. case SemIR::AddrOf::Kind:
  1385. return RebuildIfFieldsAreConstant(eval_context, inst,
  1386. &SemIR::AddrOf::type_id,
  1387. &SemIR::AddrOf::lvalue_id);
  1388. case CARBON_KIND(SemIR::ArrayType array_type): {
  1389. return RebuildAndValidateIfFieldsAreConstant(
  1390. eval_context, inst,
  1391. [&](SemIR::ArrayType result) {
  1392. auto bound_id = array_type.bound_id;
  1393. auto bound_inst = eval_context.insts().Get(result.bound_id);
  1394. auto int_bound = bound_inst.TryAs<SemIR::IntValue>();
  1395. if (!int_bound) {
  1396. CARBON_CHECK(eval_context.constant_values()
  1397. .Get(result.bound_id)
  1398. .is_symbolic(),
  1399. "Unexpected inst {0} for template constant int",
  1400. bound_inst);
  1401. return true;
  1402. }
  1403. // TODO: We should check that the size of the resulting array type
  1404. // fits in 64 bits, not just that the bound does. Should we use a
  1405. // 32-bit limit for 32-bit targets?
  1406. const auto& bound_val = eval_context.ints().Get(int_bound->int_id);
  1407. if (eval_context.types().IsSignedInt(int_bound->type_id) &&
  1408. bound_val.isNegative()) {
  1409. CARBON_DIAGNOSTIC(ArrayBoundNegative, Error,
  1410. "array bound of {0} is negative", TypedInt);
  1411. eval_context.emitter().Emit(
  1412. eval_context.GetDiagnosticLoc(bound_id), ArrayBoundNegative,
  1413. {.type = int_bound->type_id, .value = bound_val});
  1414. return false;
  1415. }
  1416. if (bound_val.getActiveBits() > 64) {
  1417. CARBON_DIAGNOSTIC(ArrayBoundTooLarge, Error,
  1418. "array bound of {0} is too large", TypedInt);
  1419. eval_context.emitter().Emit(
  1420. eval_context.GetDiagnosticLoc(bound_id), ArrayBoundTooLarge,
  1421. {.type = int_bound->type_id, .value = bound_val});
  1422. return false;
  1423. }
  1424. return true;
  1425. },
  1426. &SemIR::ArrayType::bound_id, &SemIR::ArrayType::element_type_id);
  1427. }
  1428. case SemIR::AssociatedEntity::Kind:
  1429. return RebuildIfFieldsAreConstant(eval_context, inst,
  1430. &SemIR::AssociatedEntity::type_id);
  1431. case SemIR::AssociatedEntityType::Kind:
  1432. return RebuildIfFieldsAreConstant(
  1433. eval_context, inst, &SemIR::AssociatedEntityType::interface_type_id);
  1434. case SemIR::BoundMethod::Kind:
  1435. return RebuildIfFieldsAreConstant(eval_context, inst,
  1436. &SemIR::BoundMethod::type_id,
  1437. &SemIR::BoundMethod::object_id,
  1438. &SemIR::BoundMethod::function_decl_id);
  1439. case SemIR::ClassType::Kind:
  1440. return RebuildIfFieldsAreConstant(eval_context, inst,
  1441. &SemIR::ClassType::specific_id);
  1442. case SemIR::CompleteTypeWitness::Kind:
  1443. return RebuildIfFieldsAreConstant(
  1444. eval_context, inst, &SemIR::CompleteTypeWitness::object_repr_id);
  1445. case SemIR::FacetValue::Kind:
  1446. return RebuildIfFieldsAreConstant(eval_context, inst,
  1447. &SemIR::FacetValue::type_id,
  1448. &SemIR::FacetValue::type_inst_id,
  1449. &SemIR::FacetValue::witness_inst_id);
  1450. case SemIR::FunctionType::Kind:
  1451. return RebuildIfFieldsAreConstant(eval_context, inst,
  1452. &SemIR::FunctionType::specific_id);
  1453. case SemIR::FunctionTypeWithSelfType::Kind:
  1454. return RebuildIfFieldsAreConstant(
  1455. eval_context, inst,
  1456. &SemIR::FunctionTypeWithSelfType::interface_function_type_id,
  1457. &SemIR::FunctionTypeWithSelfType::self_id);
  1458. case SemIR::GenericClassType::Kind:
  1459. return RebuildIfFieldsAreConstant(
  1460. eval_context, inst, &SemIR::GenericClassType::enclosing_specific_id);
  1461. case SemIR::GenericInterfaceType::Kind:
  1462. return RebuildIfFieldsAreConstant(
  1463. eval_context, inst,
  1464. &SemIR::GenericInterfaceType::enclosing_specific_id);
  1465. case SemIR::ImplWitness::Kind:
  1466. // We intentionally don't replace the `elements_id` field here. We want to
  1467. // track that specific InstBlock in particular, not coalesce blocks with
  1468. // the same members. That block may get updated, and we want to pick up
  1469. // those changes.
  1470. return RebuildIfFieldsAreConstant(eval_context, inst,
  1471. &SemIR::ImplWitness::specific_id);
  1472. case CARBON_KIND(SemIR::IntType int_type): {
  1473. return RebuildAndValidateIfFieldsAreConstant(
  1474. eval_context, inst,
  1475. [&](SemIR::IntType result) {
  1476. return ValidateIntType(
  1477. eval_context.context(),
  1478. eval_context.GetDiagnosticLoc({inst_id, int_type.bit_width_id}),
  1479. result);
  1480. },
  1481. &SemIR::IntType::bit_width_id);
  1482. }
  1483. case SemIR::PointerType::Kind:
  1484. return RebuildIfFieldsAreConstant(eval_context, inst,
  1485. &SemIR::PointerType::pointee_id);
  1486. case CARBON_KIND(SemIR::FloatType float_type): {
  1487. return RebuildAndValidateIfFieldsAreConstant(
  1488. eval_context, inst,
  1489. [&](SemIR::FloatType result) {
  1490. return ValidateFloatType(eval_context.context(),
  1491. eval_context.GetDiagnosticLoc(
  1492. {inst_id, float_type.bit_width_id}),
  1493. result);
  1494. },
  1495. &SemIR::FloatType::bit_width_id);
  1496. }
  1497. case SemIR::SpecificFunction::Kind:
  1498. return RebuildIfFieldsAreConstant(eval_context, inst,
  1499. &SemIR::SpecificFunction::callee_id,
  1500. &SemIR::SpecificFunction::specific_id);
  1501. case SemIR::StructType::Kind:
  1502. return RebuildIfFieldsAreConstant(eval_context, inst,
  1503. &SemIR::StructType::fields_id);
  1504. case SemIR::StructValue::Kind:
  1505. return RebuildIfFieldsAreConstant(eval_context, inst,
  1506. &SemIR::StructValue::type_id,
  1507. &SemIR::StructValue::elements_id);
  1508. case SemIR::TupleType::Kind:
  1509. return RebuildIfFieldsAreConstant(eval_context, inst,
  1510. &SemIR::TupleType::elements_id);
  1511. case SemIR::TupleValue::Kind:
  1512. return RebuildIfFieldsAreConstant(eval_context, inst,
  1513. &SemIR::TupleValue::type_id,
  1514. &SemIR::TupleValue::elements_id);
  1515. case SemIR::UnboundElementType::Kind:
  1516. return RebuildIfFieldsAreConstant(
  1517. eval_context, inst, &SemIR::UnboundElementType::class_type_id,
  1518. &SemIR::UnboundElementType::element_type_id);
  1519. // Initializers evaluate to a value of the object representation.
  1520. case SemIR::ArrayInit::Kind:
  1521. // TODO: Add an `ArrayValue` to represent a constant array object
  1522. // representation instead of using a `TupleValue`.
  1523. return RebuildInitAsValue(eval_context, inst, SemIR::TupleValue::Kind);
  1524. case SemIR::ClassInit::Kind:
  1525. // TODO: Add a `ClassValue` to represent a constant class object
  1526. // representation instead of using a `StructValue`.
  1527. return RebuildInitAsValue(eval_context, inst, SemIR::StructValue::Kind);
  1528. case SemIR::StructInit::Kind:
  1529. return RebuildInitAsValue(eval_context, inst, SemIR::StructValue::Kind);
  1530. case SemIR::TupleInit::Kind:
  1531. return RebuildInitAsValue(eval_context, inst, SemIR::TupleValue::Kind);
  1532. case SemIR::Vtable::Kind:
  1533. return RebuildIfFieldsAreConstant(eval_context, inst,
  1534. &SemIR::Vtable::virtual_functions_id);
  1535. case SemIR::AutoType::Kind:
  1536. case SemIR::BoolType::Kind:
  1537. case SemIR::BoundMethodType::Kind:
  1538. case SemIR::ErrorInst::Kind:
  1539. case SemIR::IntLiteralType::Kind:
  1540. case SemIR::LegacyFloatType::Kind:
  1541. case SemIR::NamespaceType::Kind:
  1542. case SemIR::SpecificFunctionType::Kind:
  1543. case SemIR::StringType::Kind:
  1544. case SemIR::TypeType::Kind:
  1545. case SemIR::VtableType::Kind:
  1546. case SemIR::WitnessType::Kind:
  1547. // Builtins are always template constants.
  1548. return MakeConstantResult(eval_context.context(), inst, Phase::Template);
  1549. case CARBON_KIND(SemIR::FunctionDecl fn_decl): {
  1550. return TransformIfFieldsAreConstant(
  1551. eval_context, fn_decl,
  1552. [&](SemIR::FunctionDecl result) {
  1553. return SemIR::StructValue{.type_id = result.type_id,
  1554. .elements_id = SemIR::InstBlockId::Empty};
  1555. },
  1556. &SemIR::FunctionDecl::type_id);
  1557. }
  1558. case CARBON_KIND(SemIR::ClassDecl class_decl): {
  1559. // If the class has generic parameters, we don't produce a class type, but
  1560. // a callable whose return value is a class type.
  1561. if (eval_context.classes().Get(class_decl.class_id).has_parameters()) {
  1562. return TransformIfFieldsAreConstant(
  1563. eval_context, class_decl,
  1564. [&](SemIR::ClassDecl result) {
  1565. return SemIR::StructValue{
  1566. .type_id = result.type_id,
  1567. .elements_id = SemIR::InstBlockId::Empty};
  1568. },
  1569. &SemIR::ClassDecl::type_id);
  1570. }
  1571. // A non-generic class declaration evaluates to the class type.
  1572. return MakeConstantResult(
  1573. eval_context.context(),
  1574. SemIR::ClassType{.type_id = SemIR::TypeType::SingletonTypeId,
  1575. .class_id = class_decl.class_id,
  1576. .specific_id = SemIR::SpecificId::None},
  1577. Phase::Template);
  1578. }
  1579. case CARBON_KIND(SemIR::FacetType facet_type): {
  1580. Phase phase = Phase::Template;
  1581. SemIR::FacetTypeInfo info = GetConstantFacetTypeInfo(
  1582. eval_context, facet_type.facet_type_id, &phase);
  1583. info.Canonicalize();
  1584. // TODO: Reuse `inst` if we can detect that nothing has changed.
  1585. return MakeFacetTypeResult(eval_context.context(), info, phase);
  1586. }
  1587. case CARBON_KIND(SemIR::InterfaceDecl interface_decl): {
  1588. // If the interface has generic parameters, we don't produce an interface
  1589. // type, but a callable whose return value is an interface type.
  1590. if (eval_context.interfaces()
  1591. .Get(interface_decl.interface_id)
  1592. .has_parameters()) {
  1593. return TransformIfFieldsAreConstant(
  1594. eval_context, interface_decl,
  1595. [&](SemIR::InterfaceDecl result) {
  1596. return SemIR::StructValue{
  1597. .type_id = result.type_id,
  1598. .elements_id = SemIR::InstBlockId::Empty};
  1599. },
  1600. &SemIR::InterfaceDecl::type_id);
  1601. }
  1602. // A non-generic interface declaration evaluates to a facet type.
  1603. return MakeConstantResult(
  1604. eval_context.context(),
  1605. eval_context.context().FacetTypeFromInterface(
  1606. interface_decl.interface_id, SemIR::SpecificId::None),
  1607. Phase::Template);
  1608. }
  1609. case CARBON_KIND(SemIR::SpecificConstant specific): {
  1610. // Pull the constant value out of the specific.
  1611. return SemIR::GetConstantValueInSpecific(
  1612. eval_context.sem_ir(), specific.specific_id, specific.inst_id);
  1613. }
  1614. // These cases are treated as being the unique canonical definition of the
  1615. // corresponding constant value.
  1616. // TODO: This doesn't properly handle redeclarations. Consider adding a
  1617. // corresponding `Value` inst for each of these cases, or returning the
  1618. // first declaration.
  1619. case SemIR::AdaptDecl::Kind:
  1620. case SemIR::AssociatedConstantDecl::Kind:
  1621. case SemIR::BaseDecl::Kind:
  1622. case SemIR::FieldDecl::Kind:
  1623. case SemIR::ImplDecl::Kind:
  1624. case SemIR::Namespace::Kind:
  1625. return SemIR::ConstantId::ForTemplateConstant(inst_id);
  1626. case SemIR::BoolLiteral::Kind:
  1627. case SemIR::FloatLiteral::Kind:
  1628. case SemIR::IntValue::Kind:
  1629. case SemIR::StringLiteral::Kind:
  1630. // Promote literals to the constant block.
  1631. // TODO: Convert literals into a canonical form. Currently we can form two
  1632. // different `i32` constants with the same value if they are represented
  1633. // by `APInt`s with different bit widths.
  1634. // TODO: Can the type of an IntValue or FloatLiteral be symbolic? If so,
  1635. // we may need to rebuild.
  1636. return MakeConstantResult(eval_context.context(), inst, Phase::Template);
  1637. // The elements of a constant aggregate can be accessed.
  1638. case SemIR::ClassElementAccess::Kind:
  1639. case SemIR::StructAccess::Kind:
  1640. case SemIR::TupleAccess::Kind:
  1641. return PerformAggregateAccess(eval_context, inst);
  1642. case CARBON_KIND(SemIR::ImplWitnessAccess access_inst): {
  1643. // This is PerformAggregateAccess followed by GetConstantInSpecific.
  1644. Phase phase = Phase::Template;
  1645. if (ReplaceFieldWithConstantValue(eval_context, &access_inst,
  1646. &SemIR::ImplWitnessAccess::witness_id,
  1647. &phase)) {
  1648. if (auto witness = eval_context.insts().TryGetAs<SemIR::ImplWitness>(
  1649. access_inst.witness_id)) {
  1650. auto elements = eval_context.inst_blocks().Get(witness->elements_id);
  1651. auto index = static_cast<size_t>(access_inst.index.index);
  1652. CARBON_CHECK(index < elements.size(), "Access out of bounds.");
  1653. // `Phase` is not used here. If this element is a template constant,
  1654. // then so is the result of indexing, even if the aggregate also
  1655. // contains a symbolic context.
  1656. auto element = elements[index];
  1657. if (!element.has_value()) {
  1658. // TODO: Perhaps this should be a `{}` value with incomplete type?
  1659. CARBON_DIAGNOSTIC(ImplAccessMemberBeforeComplete, Error,
  1660. "accessing member from impl before the end of "
  1661. "its definition");
  1662. // TODO: Add note pointing to the impl declaration.
  1663. eval_context.emitter().Emit(eval_context.GetDiagnosticLoc(inst_id),
  1664. ImplAccessMemberBeforeComplete);
  1665. return SemIR::ErrorInst::SingletonConstantId;
  1666. }
  1667. LoadImportRef(eval_context.context(), element);
  1668. return GetConstantValueInSpecific(eval_context.sem_ir(),
  1669. witness->specific_id, element);
  1670. } else {
  1671. CARBON_CHECK(phase != Phase::Template,
  1672. "Failed to evaluate template constant {0} arg0: {1}",
  1673. inst, eval_context.insts().Get(access_inst.witness_id));
  1674. }
  1675. return MakeConstantResult(eval_context.context(), access_inst, phase);
  1676. }
  1677. return MakeNonConstantResult(phase);
  1678. }
  1679. case CARBON_KIND(SemIR::ArrayIndex index): {
  1680. return PerformArrayIndex(eval_context, index);
  1681. }
  1682. case CARBON_KIND(SemIR::Call call): {
  1683. return MakeConstantForCall(eval_context,
  1684. eval_context.GetDiagnosticLoc(inst_id), call);
  1685. }
  1686. // TODO: These need special handling.
  1687. case SemIR::BindValue::Kind:
  1688. case SemIR::Deref::Kind:
  1689. case SemIR::ImportRefLoaded::Kind:
  1690. case SemIR::ReturnSlot::Kind:
  1691. case SemIR::Temporary::Kind:
  1692. case SemIR::TemporaryStorage::Kind:
  1693. case SemIR::ValueAsRef::Kind:
  1694. case SemIR::VtablePtr::Kind:
  1695. break;
  1696. case CARBON_KIND(SemIR::SymbolicBindingPattern bind): {
  1697. // TODO: Disable constant evaluation of SymbolicBindingPattern once
  1698. // DeduceGenericCallArguments no longer needs implicit params to have
  1699. // constant values.
  1700. const auto& bind_name =
  1701. eval_context.entity_names().Get(bind.entity_name_id);
  1702. // If we know which specific we're evaluating within and this is an
  1703. // argument of that specific, its constant value is the corresponding
  1704. // argument value.
  1705. if (auto value =
  1706. eval_context.GetCompileTimeBindValue(bind_name.bind_index);
  1707. value.has_value()) {
  1708. return value;
  1709. }
  1710. // The constant form of a symbolic binding is an idealized form of the
  1711. // original, with no equivalent value.
  1712. bind.entity_name_id =
  1713. eval_context.entity_names().MakeCanonical(bind.entity_name_id);
  1714. return MakeConstantResult(eval_context.context(), bind, Phase::Symbolic);
  1715. }
  1716. case CARBON_KIND(SemIR::BindSymbolicName bind): {
  1717. const auto& bind_name =
  1718. eval_context.entity_names().Get(bind.entity_name_id);
  1719. Phase phase;
  1720. if (bind_name.name_id == SemIR::NameId::PeriodSelf) {
  1721. phase = Phase::PeriodSelfSymbolic;
  1722. } else {
  1723. // If we know which specific we're evaluating within and this is an
  1724. // argument of that specific, its constant value is the corresponding
  1725. // argument value.
  1726. if (auto value =
  1727. eval_context.GetCompileTimeBindValue(bind_name.bind_index);
  1728. value.has_value()) {
  1729. return value;
  1730. }
  1731. phase = Phase::Symbolic;
  1732. }
  1733. // The constant form of a symbolic binding is an idealized form of the
  1734. // original, with no equivalent value.
  1735. bind.entity_name_id =
  1736. eval_context.entity_names().MakeCanonical(bind.entity_name_id);
  1737. bind.value_id = SemIR::InstId::None;
  1738. if (!ReplaceFieldWithConstantValue(
  1739. eval_context, &bind, &SemIR::BindSymbolicName::type_id, &phase)) {
  1740. return MakeNonConstantResult(phase);
  1741. }
  1742. return MakeConstantResult(eval_context.context(), bind, phase);
  1743. }
  1744. // AsCompatible changes the type of the source instruction; its constant
  1745. // value, if there is one, needs to be modified to be of the same type.
  1746. case CARBON_KIND(SemIR::AsCompatible inst): {
  1747. auto value = eval_context.GetConstantValue(inst.source_id);
  1748. if (!value.is_constant()) {
  1749. return value;
  1750. }
  1751. auto from_phase = Phase::Template;
  1752. auto value_inst_id =
  1753. GetConstantValue(eval_context, inst.source_id, &from_phase);
  1754. auto to_phase = Phase::Template;
  1755. auto type_id = GetConstantValue(eval_context, inst.type_id, &to_phase);
  1756. auto value_inst = eval_context.insts().Get(value_inst_id);
  1757. value_inst.SetType(type_id);
  1758. if (to_phase >= from_phase) {
  1759. // If moving from a template constant value to a symbolic type, the new
  1760. // constant value takes on the phase of the new type. We're adding the
  1761. // symbolic bit to the new constant value due to the presence of a
  1762. // symbolic type.
  1763. return MakeConstantResult(eval_context.context(), value_inst, to_phase);
  1764. } else {
  1765. // If moving from a symbolic constant value to a template type, the new
  1766. // constant value has a phase that depends on what is in the value. If
  1767. // there is anything symbolic within the value, then it's symbolic. We
  1768. // can't easily determine that here without evaluating a new constant
  1769. // value. See
  1770. // https://github.com/carbon-language/carbon-lang/pull/4881#discussion_r1939961372
  1771. [[clang::musttail]] return TryEvalInstInContext(
  1772. eval_context, SemIR::InstId::None, value_inst);
  1773. }
  1774. }
  1775. // These semantic wrappers don't change the constant value.
  1776. case CARBON_KIND(SemIR::BindAlias typed_inst): {
  1777. return eval_context.GetConstantValue(typed_inst.value_id);
  1778. }
  1779. case CARBON_KIND(SemIR::ExportDecl typed_inst): {
  1780. return eval_context.GetConstantValue(typed_inst.value_id);
  1781. }
  1782. case CARBON_KIND(SemIR::NameRef typed_inst): {
  1783. return eval_context.GetConstantValue(typed_inst.value_id);
  1784. }
  1785. case CARBON_KIND(SemIR::ValueParamPattern param_pattern): {
  1786. // TODO: Treat this as a non-expression (here and in GetExprCategory)
  1787. // once generic deduction doesn't need patterns to have constant values.
  1788. return eval_context.GetConstantValue(param_pattern.subpattern_id);
  1789. }
  1790. case CARBON_KIND(SemIR::Converted typed_inst): {
  1791. return eval_context.GetConstantValue(typed_inst.result_id);
  1792. }
  1793. case CARBON_KIND(SemIR::InitializeFrom typed_inst): {
  1794. return eval_context.GetConstantValue(typed_inst.src_id);
  1795. }
  1796. case CARBON_KIND(SemIR::SpliceBlock typed_inst): {
  1797. return eval_context.GetConstantValue(typed_inst.result_id);
  1798. }
  1799. case CARBON_KIND(SemIR::ValueOfInitializer typed_inst): {
  1800. return eval_context.GetConstantValue(typed_inst.init_id);
  1801. }
  1802. case CARBON_KIND(SemIR::FacetAccessType typed_inst): {
  1803. Phase phase = Phase::Template;
  1804. if (ReplaceFieldWithConstantValue(
  1805. eval_context, &typed_inst,
  1806. &SemIR::FacetAccessType::facet_value_inst_id, &phase)) {
  1807. if (auto facet_value = eval_context.insts().TryGetAs<SemIR::FacetValue>(
  1808. typed_inst.facet_value_inst_id)) {
  1809. return eval_context.constant_values().Get(facet_value->type_inst_id);
  1810. }
  1811. return MakeConstantResult(eval_context.context(), typed_inst, phase);
  1812. } else {
  1813. return MakeNonConstantResult(phase);
  1814. }
  1815. }
  1816. case CARBON_KIND(SemIR::FacetAccessWitness typed_inst): {
  1817. Phase phase = Phase::Template;
  1818. if (ReplaceFieldWithConstantValue(
  1819. eval_context, &typed_inst,
  1820. &SemIR::FacetAccessWitness::facet_value_inst_id, &phase)) {
  1821. if (auto facet_value = eval_context.insts().TryGetAs<SemIR::FacetValue>(
  1822. typed_inst.facet_value_inst_id)) {
  1823. return eval_context.constant_values().Get(
  1824. facet_value->witness_inst_id);
  1825. }
  1826. return MakeConstantResult(eval_context.context(), typed_inst, phase);
  1827. } else {
  1828. return MakeNonConstantResult(phase);
  1829. }
  1830. }
  1831. case CARBON_KIND(SemIR::WhereExpr typed_inst): {
  1832. Phase phase = Phase::Template;
  1833. SemIR::TypeId base_facet_type_id =
  1834. eval_context.insts().Get(typed_inst.period_self_id).type_id();
  1835. SemIR::Inst base_facet_inst =
  1836. eval_context.GetConstantValueAsInst(base_facet_type_id);
  1837. SemIR::FacetTypeInfo info = {.other_requirements = false};
  1838. // `where` provides that the base facet is an error, `type`, or a facet
  1839. // type.
  1840. if (auto facet_type = base_facet_inst.TryAs<SemIR::FacetType>()) {
  1841. info = GetConstantFacetTypeInfo(eval_context, facet_type->facet_type_id,
  1842. &phase);
  1843. } else if (base_facet_type_id == SemIR::ErrorInst::SingletonTypeId) {
  1844. return SemIR::ErrorInst::SingletonConstantId;
  1845. } else {
  1846. CARBON_CHECK(base_facet_type_id == SemIR::TypeType::SingletonTypeId,
  1847. "Unexpected type_id: {0}, inst: {1}", base_facet_type_id,
  1848. base_facet_inst);
  1849. }
  1850. if (typed_inst.requirements_id.has_value()) {
  1851. auto insts = eval_context.inst_blocks().Get(typed_inst.requirements_id);
  1852. for (auto inst_id : insts) {
  1853. if (auto rewrite =
  1854. eval_context.insts().TryGetAs<SemIR::RequirementRewrite>(
  1855. inst_id)) {
  1856. SemIR::ConstantId lhs =
  1857. eval_context.GetConstantValue(rewrite->lhs_id);
  1858. SemIR::ConstantId rhs =
  1859. eval_context.GetConstantValue(rewrite->rhs_id);
  1860. // `where` requirements using `.Self` should not be considered
  1861. // symbolic
  1862. UpdatePhaseIgnorePeriodSelf(eval_context, lhs, &phase);
  1863. UpdatePhaseIgnorePeriodSelf(eval_context, rhs, &phase);
  1864. info.rewrite_constraints.push_back(
  1865. {.lhs_const_id = lhs, .rhs_const_id = rhs});
  1866. } else {
  1867. // TODO: Handle other requirements
  1868. info.other_requirements = true;
  1869. }
  1870. }
  1871. }
  1872. info.Canonicalize();
  1873. return MakeFacetTypeResult(eval_context.context(), info, phase);
  1874. }
  1875. // `not true` -> `false`, `not false` -> `true`.
  1876. // All other uses of unary `not` are non-constant.
  1877. case CARBON_KIND(SemIR::UnaryOperatorNot typed_inst): {
  1878. auto const_id = eval_context.GetConstantValue(typed_inst.operand_id);
  1879. auto phase = GetPhase(eval_context, const_id);
  1880. if (phase == Phase::Template) {
  1881. auto value = eval_context.insts().GetAs<SemIR::BoolLiteral>(
  1882. eval_context.constant_values().GetInstId(const_id));
  1883. return MakeBoolResult(eval_context.context(), value.type_id,
  1884. !value.value.ToBool());
  1885. }
  1886. if (phase == Phase::UnknownDueToError) {
  1887. return SemIR::ErrorInst::SingletonConstantId;
  1888. }
  1889. break;
  1890. }
  1891. // `const (const T)` evaluates to `const T`. Otherwise, `const T` evaluates
  1892. // to itself.
  1893. case CARBON_KIND(SemIR::ConstType typed_inst): {
  1894. auto phase = Phase::Template;
  1895. auto inner_id =
  1896. GetConstantValue(eval_context, typed_inst.inner_id, &phase);
  1897. if (eval_context.context().types().Is<SemIR::ConstType>(inner_id)) {
  1898. return eval_context.context().types().GetConstantId(inner_id);
  1899. }
  1900. typed_inst.inner_id = inner_id;
  1901. return MakeConstantResult(eval_context.context(), typed_inst, phase);
  1902. }
  1903. case CARBON_KIND(SemIR::RequireCompleteType require_complete): {
  1904. auto phase = Phase::Template;
  1905. auto witness_type_id = eval_context.context().GetSingletonType(
  1906. SemIR::WitnessType::SingletonInstId);
  1907. auto complete_type_id = GetConstantValue(
  1908. eval_context, require_complete.complete_type_id, &phase);
  1909. // If the type is a template constant, require it to be complete now.
  1910. if (phase == Phase::Template) {
  1911. if (!eval_context.context().TryToCompleteType(
  1912. complete_type_id, eval_context.GetDiagnosticLoc(inst_id), [&] {
  1913. CARBON_DIAGNOSTIC(IncompleteTypeInMonomorphization, Error,
  1914. "{0} evaluates to incomplete type {1}",
  1915. SemIR::TypeId, SemIR::TypeId);
  1916. return eval_context.emitter().Build(
  1917. eval_context.GetDiagnosticLoc(inst_id),
  1918. IncompleteTypeInMonomorphization,
  1919. require_complete.complete_type_id, complete_type_id);
  1920. })) {
  1921. return SemIR::ErrorInst::SingletonConstantId;
  1922. }
  1923. return MakeConstantResult(
  1924. eval_context.context(),
  1925. SemIR::CompleteTypeWitness{
  1926. .type_id = witness_type_id,
  1927. .object_repr_id =
  1928. eval_context.types().GetObjectRepr(complete_type_id)},
  1929. phase);
  1930. }
  1931. // If it's not a template constant, require it to be complete once it
  1932. // becomes one.
  1933. return MakeConstantResult(
  1934. eval_context.context(),
  1935. SemIR::RequireCompleteType{.type_id = witness_type_id,
  1936. .complete_type_id = complete_type_id},
  1937. phase);
  1938. }
  1939. // These cases are either not expressions or not constant.
  1940. case SemIR::AddrPattern::Kind:
  1941. case SemIR::Assign::Kind:
  1942. case SemIR::BindName::Kind:
  1943. case SemIR::BindingPattern::Kind:
  1944. case SemIR::BlockArg::Kind:
  1945. case SemIR::Branch::Kind:
  1946. case SemIR::BranchIf::Kind:
  1947. case SemIR::BranchWithArg::Kind:
  1948. case SemIR::ImportDecl::Kind:
  1949. case SemIR::NameBindingDecl::Kind:
  1950. case SemIR::OutParam::Kind:
  1951. case SemIR::OutParamPattern::Kind:
  1952. case SemIR::RequirementEquivalent::Kind:
  1953. case SemIR::RequirementImpls::Kind:
  1954. case SemIR::RequirementRewrite::Kind:
  1955. case SemIR::Return::Kind:
  1956. case SemIR::ReturnExpr::Kind:
  1957. case SemIR::ReturnSlotPattern::Kind:
  1958. case SemIR::StructLiteral::Kind:
  1959. case SemIR::TupleLiteral::Kind:
  1960. case SemIR::ValueParam::Kind:
  1961. case SemIR::VarPattern::Kind:
  1962. case SemIR::VarStorage::Kind:
  1963. break;
  1964. case SemIR::ImportRefUnloaded::Kind:
  1965. CARBON_FATAL("ImportRefUnloaded should be loaded before TryEvalInst: {0}",
  1966. inst);
  1967. }
  1968. return SemIR::ConstantId::NotConstant;
  1969. }
  1970. auto TryEvalInst(Context& context, SemIR::InstId inst_id, SemIR::Inst inst)
  1971. -> SemIR::ConstantId {
  1972. EvalContext eval_context(context, inst_id);
  1973. return TryEvalInstInContext(eval_context, inst_id, inst);
  1974. }
  1975. auto TryEvalBlockForSpecific(Context& context, SemIRLoc loc,
  1976. SemIR::SpecificId specific_id,
  1977. SemIR::GenericInstIndex::Region region)
  1978. -> SemIR::InstBlockId {
  1979. auto generic_id = context.specifics().Get(specific_id).generic_id;
  1980. auto eval_block_id = context.generics().Get(generic_id).GetEvalBlock(region);
  1981. auto eval_block = context.inst_blocks().Get(eval_block_id);
  1982. llvm::SmallVector<SemIR::InstId> result;
  1983. result.resize(eval_block.size(), SemIR::InstId::None);
  1984. EvalContext eval_context(context, loc, specific_id,
  1985. SpecificEvalInfo{
  1986. .region = region,
  1987. .values = result,
  1988. });
  1989. DiagnosticAnnotationScope annotate_diagnostics(
  1990. &context.emitter(), [&](auto& builder) {
  1991. CARBON_DIAGNOSTIC(ResolvingSpecificHere, Note, "in {0} used here",
  1992. InstIdAsType);
  1993. builder.Note(loc, ResolvingSpecificHere,
  1994. GetInstForSpecific(context, specific_id));
  1995. });
  1996. for (auto [i, inst_id] : llvm::enumerate(eval_block)) {
  1997. auto const_id = TryEvalInstInContext(eval_context, inst_id,
  1998. context.insts().Get(inst_id));
  1999. result[i] = context.constant_values().GetInstId(const_id);
  2000. CARBON_CHECK(result[i].has_value());
  2001. }
  2002. return context.inst_blocks().Add(result);
  2003. }
  2004. } // namespace Carbon::Check