tokenized_buffer.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <cmath>
  7. #include <iterator>
  8. #include <optional>
  9. #include <utility>
  10. #include "common/check.h"
  11. #include "common/string_helpers.h"
  12. #include "llvm/ADT/StringRef.h"
  13. #include "llvm/Support/Format.h"
  14. #include "llvm/Support/FormatVariadic.h"
  15. #include "toolchain/base/shared_value_stores.h"
  16. #include "toolchain/diagnostics/diagnostic_emitter.h"
  17. #include "toolchain/lex/character_set.h"
  18. #include "toolchain/lex/numeric_literal.h"
  19. #include "toolchain/lex/string_literal.h"
  20. namespace Carbon::Lex {
  21. auto TokenizedBuffer::GetLine(TokenIndex token) const -> LineIndex {
  22. return FindLineIndex(GetTokenInfo(token).byte_offset());
  23. }
  24. auto TokenizedBuffer::GetLineNumber(TokenIndex token) const -> int {
  25. return GetLine(token).index + 1;
  26. }
  27. auto TokenizedBuffer::GetColumnNumber(TokenIndex token) const -> int {
  28. const auto& token_info = GetTokenInfo(token);
  29. const auto& line_info = GetLineInfo(FindLineIndex(token_info.byte_offset()));
  30. return token_info.byte_offset() - line_info.start + 1;
  31. }
  32. auto TokenizedBuffer::GetEndLoc(TokenIndex token) const
  33. -> std::pair<LineIndex, int> {
  34. auto line = GetLine(token);
  35. int column = GetColumnNumber(token);
  36. auto token_text = GetTokenText(token);
  37. if (auto [before_newline, after_newline] = token_text.rsplit('\n');
  38. before_newline.size() == token_text.size()) {
  39. // Token fits on one line, advance the column number.
  40. column += before_newline.size();
  41. } else {
  42. // Token contains newlines.
  43. line.index += before_newline.count('\n') + 1;
  44. column = 1 + after_newline.size();
  45. }
  46. return {line, column};
  47. }
  48. auto TokenizedBuffer::GetTokenText(TokenIndex token) const -> llvm::StringRef {
  49. const auto& token_info = GetTokenInfo(token);
  50. llvm::StringRef fixed_spelling = token_info.kind().fixed_spelling();
  51. if (!fixed_spelling.empty()) {
  52. return fixed_spelling;
  53. }
  54. if (token_info.kind() == TokenKind::Error) {
  55. return source_->text().substr(token_info.byte_offset(),
  56. token_info.error_length());
  57. }
  58. // Refer back to the source text to preserve oddities like radix or digit
  59. // separators the author included.
  60. if (token_info.kind() == TokenKind::IntLiteral ||
  61. token_info.kind() == TokenKind::RealLiteral) {
  62. std::optional<NumericLiteral> relexed_token =
  63. NumericLiteral::Lex(source_->text().substr(token_info.byte_offset()),
  64. token_info.kind() == TokenKind::RealLiteral);
  65. CARBON_CHECK(relexed_token, "Could not reform numeric literal token.");
  66. return relexed_token->text();
  67. }
  68. // Refer back to the source text to find the original spelling, including
  69. // escape sequences etc.
  70. if (token_info.kind() == TokenKind::StringLiteral) {
  71. std::optional<StringLiteral> relexed_token =
  72. StringLiteral::Lex(source_->text().substr(token_info.byte_offset()));
  73. CARBON_CHECK(relexed_token, "Could not reform string literal token.");
  74. return relexed_token->text();
  75. }
  76. // Refer back to the source text to avoid needing to reconstruct the
  77. // spelling from the size.
  78. if (token_info.kind().is_sized_type_literal()) {
  79. llvm::StringRef suffix = source_->text()
  80. .substr(token_info.byte_offset() + 1)
  81. .take_while(IsDecimalDigit);
  82. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  83. }
  84. if (token_info.kind() == TokenKind::FileStart ||
  85. token_info.kind() == TokenKind::FileEnd) {
  86. return llvm::StringRef();
  87. }
  88. CARBON_CHECK(token_info.kind() == TokenKind::Identifier, "{0}",
  89. token_info.kind());
  90. return value_stores_->identifiers().Get(token_info.ident_id());
  91. }
  92. auto TokenizedBuffer::GetIdentifier(TokenIndex token) const -> IdentifierId {
  93. const auto& token_info = GetTokenInfo(token);
  94. CARBON_CHECK(token_info.kind() == TokenKind::Identifier, "{0}",
  95. token_info.kind());
  96. return token_info.ident_id();
  97. }
  98. auto TokenizedBuffer::GetIntLiteral(TokenIndex token) const -> IntId {
  99. const auto& token_info = GetTokenInfo(token);
  100. CARBON_CHECK(token_info.kind() == TokenKind::IntLiteral, "{0}",
  101. token_info.kind());
  102. return token_info.int_id();
  103. }
  104. auto TokenizedBuffer::GetRealLiteral(TokenIndex token) const -> RealId {
  105. const auto& token_info = GetTokenInfo(token);
  106. CARBON_CHECK(token_info.kind() == TokenKind::RealLiteral, "{0}",
  107. token_info.kind());
  108. return token_info.real_id();
  109. }
  110. auto TokenizedBuffer::GetStringLiteralValue(TokenIndex token) const
  111. -> StringLiteralValueId {
  112. const auto& token_info = GetTokenInfo(token);
  113. CARBON_CHECK(token_info.kind() == TokenKind::StringLiteral, "{0}",
  114. token_info.kind());
  115. return token_info.string_literal_id();
  116. }
  117. auto TokenizedBuffer::GetTypeLiteralSize(TokenIndex token) const -> IntId {
  118. const auto& token_info = GetTokenInfo(token);
  119. CARBON_CHECK(token_info.kind().is_sized_type_literal(), "{0}",
  120. token_info.kind());
  121. return token_info.int_id();
  122. }
  123. auto TokenizedBuffer::GetMatchedClosingToken(TokenIndex opening_token) const
  124. -> TokenIndex {
  125. const auto& opening_token_info = GetTokenInfo(opening_token);
  126. CARBON_CHECK(opening_token_info.kind().is_opening_symbol(), "{0}",
  127. opening_token_info.kind());
  128. return opening_token_info.closing_token_index();
  129. }
  130. auto TokenizedBuffer::GetMatchedOpeningToken(TokenIndex closing_token) const
  131. -> TokenIndex {
  132. const auto& closing_token_info = GetTokenInfo(closing_token);
  133. CARBON_CHECK(closing_token_info.kind().is_closing_symbol(), "{0}",
  134. closing_token_info.kind());
  135. return closing_token_info.opening_token_index();
  136. }
  137. auto TokenizedBuffer::IsRecoveryToken(TokenIndex token) const -> bool {
  138. if (recovery_tokens_.empty()) {
  139. return false;
  140. }
  141. return recovery_tokens_[token.index];
  142. }
  143. auto TokenizedBuffer::GetNextLine(LineIndex line) const -> LineIndex {
  144. LineIndex next(line.index + 1);
  145. CARBON_DCHECK(static_cast<size_t>(next.index) < line_infos_.size());
  146. return next;
  147. }
  148. auto TokenizedBuffer::GetPrevLine(LineIndex line) const -> LineIndex {
  149. CARBON_CHECK(line.index > 0);
  150. return LineIndex(line.index - 1);
  151. }
  152. auto TokenizedBuffer::GetIndentColumnNumber(LineIndex line) const -> int {
  153. return GetLineInfo(line).indent + 1;
  154. }
  155. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  156. index = std::max(widths.index, index);
  157. kind = std::max(widths.kind, kind);
  158. column = std::max(widths.column, column);
  159. line = std::max(widths.line, line);
  160. indent = std::max(widths.indent, indent);
  161. }
  162. // Compute the printed width of a number. When numbers are printed in decimal,
  163. // the number of digits needed is one more than the log-base-10 of the
  164. // value. We handle a value of `zero` explicitly.
  165. //
  166. // This routine requires its argument to be *non-negative*.
  167. static auto ComputeDecimalPrintedWidth(int number) -> int {
  168. CARBON_CHECK(number >= 0, "Negative numbers are not supported.");
  169. if (number == 0) {
  170. return 1;
  171. }
  172. return static_cast<int>(std::log10(number)) + 1;
  173. }
  174. auto TokenizedBuffer::GetTokenPrintWidths(TokenIndex token) const
  175. -> PrintWidths {
  176. PrintWidths widths = {};
  177. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  178. widths.kind = GetKind(token).name().size();
  179. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  180. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  181. widths.indent =
  182. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  183. return widths;
  184. }
  185. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream,
  186. bool omit_file_boundary_tokens) const -> void {
  187. output_stream << "- filename: " << source_->filename() << "\n"
  188. << " tokens:\n";
  189. PrintWidths widths = {};
  190. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  191. for (TokenIndex token : tokens()) {
  192. widths.Widen(GetTokenPrintWidths(token));
  193. }
  194. for (TokenIndex token : tokens()) {
  195. if (omit_file_boundary_tokens) {
  196. auto kind = GetKind(token);
  197. if (kind == TokenKind::FileStart || kind == TokenKind::FileEnd) {
  198. continue;
  199. }
  200. }
  201. PrintToken(output_stream, token, widths);
  202. output_stream << "\n";
  203. }
  204. }
  205. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  206. TokenIndex token) const -> void {
  207. PrintToken(output_stream, token, {});
  208. }
  209. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  210. TokenIndex token, PrintWidths widths) const
  211. -> void {
  212. widths.Widen(GetTokenPrintWidths(token));
  213. int token_index = token.index;
  214. const auto& token_info = GetTokenInfo(token);
  215. LineIndex line_index = FindLineIndex(token_info.byte_offset());
  216. llvm::StringRef token_text = GetTokenText(token);
  217. // Output the main chunk using one format string. We have to do the
  218. // justification manually in order to use the dynamically computed widths
  219. // and get the quotes included.
  220. output_stream << llvm::formatv(
  221. " - { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  222. "spelling: \"{5}\"",
  223. llvm::format_decimal(token_index, widths.index),
  224. llvm::right_justify(
  225. llvm::formatv("\"{0}\"", token_info.kind().name()).str(),
  226. widths.kind + 2),
  227. llvm::format_decimal(GetLineNumber(token), widths.line),
  228. llvm::format_decimal(GetColumnNumber(token), widths.column),
  229. llvm::format_decimal(GetIndentColumnNumber(line_index), widths.indent),
  230. FormatEscaped(token_text, /*use_hex_escapes=*/true));
  231. switch (token_info.kind()) {
  232. case TokenKind::Identifier:
  233. output_stream << ", identifier: " << GetIdentifier(token).index;
  234. break;
  235. case TokenKind::IntLiteral:
  236. output_stream << ", value: \"";
  237. value_stores_->ints()
  238. .Get(GetIntLiteral(token))
  239. .print(output_stream, /*isSigned=*/false);
  240. output_stream << "\"";
  241. break;
  242. case TokenKind::RealLiteral:
  243. output_stream << ", value: \""
  244. << value_stores_->reals().Get(GetRealLiteral(token))
  245. << "\"";
  246. break;
  247. case TokenKind::StringLiteral:
  248. output_stream << ", value: \""
  249. << FormatEscaped(value_stores_->string_literal_values().Get(
  250. GetStringLiteralValue(token)),
  251. /*use_hex_escapes=*/true)
  252. << "\"";
  253. break;
  254. default:
  255. if (token_info.kind().is_opening_symbol()) {
  256. output_stream << ", closing_token: "
  257. << GetMatchedClosingToken(token).index;
  258. } else if (token_info.kind().is_closing_symbol()) {
  259. output_stream << ", opening_token: "
  260. << GetMatchedOpeningToken(token).index;
  261. }
  262. break;
  263. }
  264. if (token_info.has_leading_space()) {
  265. output_stream << ", has_leading_space: true";
  266. }
  267. if (IsRecoveryToken(token)) {
  268. output_stream << ", recovery: true";
  269. }
  270. output_stream << " }";
  271. }
  272. // Find the line index corresponding to a specific byte offset within the source
  273. // text for this tokenized buffer.
  274. //
  275. // This takes advantage of the lines being sorted by their starting byte offsets
  276. // to do a binary search for the line that contains the provided offset.
  277. auto TokenizedBuffer::FindLineIndex(int32_t byte_offset) const -> LineIndex {
  278. CARBON_DCHECK(!line_infos_.empty());
  279. const auto* line_it =
  280. llvm::partition_point(line_infos_, [byte_offset](LineInfo line_info) {
  281. return line_info.start <= byte_offset;
  282. });
  283. --line_it;
  284. // If this isn't the first line but it starts past the end of the source, then
  285. // this is a synthetic line added for simplicity of lexing. Step back one
  286. // further to find the last non-synthetic line.
  287. if (line_it != line_infos_.begin() &&
  288. line_it->start == static_cast<int32_t>(source_->text().size())) {
  289. --line_it;
  290. }
  291. CARBON_DCHECK(line_it->start <= byte_offset);
  292. return LineIndex(line_it - line_infos_.begin());
  293. }
  294. auto TokenizedBuffer::GetLineInfo(LineIndex line) -> LineInfo& {
  295. return line_infos_[line.index];
  296. }
  297. auto TokenizedBuffer::GetLineInfo(LineIndex line) const -> const LineInfo& {
  298. return line_infos_[line.index];
  299. }
  300. auto TokenizedBuffer::AddLine(LineInfo info) -> LineIndex {
  301. line_infos_.push_back(info);
  302. return LineIndex(static_cast<int>(line_infos_.size()) - 1);
  303. }
  304. auto TokenizedBuffer::IsAfterComment(TokenIndex token,
  305. CommentIndex comment_index) const -> bool {
  306. const auto& comment_data = comments_[comment_index.index];
  307. return GetTokenInfo(token).byte_offset() > comment_data.start;
  308. }
  309. auto TokenizedBuffer::GetCommentText(CommentIndex comment_index) const
  310. -> llvm::StringRef {
  311. const auto& comment_data = comments_[comment_index.index];
  312. return source_->text().substr(comment_data.start, comment_data.length);
  313. }
  314. auto TokenizedBuffer::AddComment(int32_t indent, int32_t start, int32_t end)
  315. -> void {
  316. if (!comments_.empty()) {
  317. auto& comment = comments_.back();
  318. if (comment.start + comment.length + indent == start) {
  319. comment.length = end - comment.start;
  320. return;
  321. }
  322. }
  323. comments_.push_back({.start = start, .length = end - start});
  324. }
  325. auto TokenizedBuffer::CollectMemUsage(MemUsage& mem_usage,
  326. llvm::StringRef label) const -> void {
  327. mem_usage.Collect(MemUsage::ConcatLabel(label, "allocator_"), allocator_);
  328. mem_usage.Collect(MemUsage::ConcatLabel(label, "token_infos_"), token_infos_);
  329. mem_usage.Collect(MemUsage::ConcatLabel(label, "line_infos_"), line_infos_);
  330. mem_usage.Collect(MemUsage::ConcatLabel(label, "comments_"), comments_);
  331. }
  332. auto TokenizedBuffer::SourcePointerToDiagnosticLoc(const char* loc) const
  333. -> Diagnostics::ConvertedLoc {
  334. CARBON_CHECK(StringRefContainsPointer(source_->text(), loc),
  335. "location not within buffer");
  336. int32_t offset = loc - source_->text().begin();
  337. // Find the first line starting after the given location.
  338. const auto* next_line_it = llvm::partition_point(
  339. line_infos_,
  340. [offset](const LineInfo& line) { return line.start <= offset; });
  341. // Step back one line to find the line containing the given position.
  342. CARBON_CHECK(next_line_it != line_infos_.begin(),
  343. "location precedes the start of the first line");
  344. const auto* line_it = std::prev(next_line_it);
  345. int line_number = line_it - line_infos_.begin();
  346. int column_number = offset - line_it->start;
  347. // Grab the line from the buffer by slicing from this line to the next
  348. // minus the newline. When on the last line, instead use the start to the end
  349. // of the buffer.
  350. llvm::StringRef text = source_->text();
  351. llvm::StringRef line = next_line_it != line_infos_.end()
  352. ? text.slice(line_it->start, next_line_it->start)
  353. : text.substr(line_it->start);
  354. // Remove a newline at the end of the line if present.
  355. // TODO: This should expand to remove all vertical whitespace bytes at the
  356. // tail of the line such as CR+LF, etc.
  357. line.consume_back("\n");
  358. return {.loc = {.filename = source_->filename(),
  359. .line = line,
  360. .line_number = line_number + 1,
  361. .column_number = column_number + 1},
  362. .last_byte_offset = offset};
  363. }
  364. auto TokenizedBuffer::TokenToDiagnosticLoc(TokenIndex token) const
  365. -> Diagnostics::ConvertedLoc {
  366. // Map the token location into a position within the source buffer.
  367. const char* token_start =
  368. source_->text().begin() + GetTokenInfo(token).byte_offset();
  369. // Find the corresponding file location.
  370. // TODO: Should we somehow indicate in the diagnostic location if this token
  371. // is a recovery token that doesn't correspond to the original source?
  372. auto converted = SourcePointerToDiagnosticLoc(token_start);
  373. converted.loc.length = GetTokenText(token).size();
  374. return converted;
  375. }
  376. } // namespace Carbon::Lex