Skip to content

Commit

Permalink
Preprocessor: add support for UCN identifiers
Browse files Browse the repository at this point in the history
Closes Vexu#823
  • Loading branch information
ehaas committed Jan 22, 2025
1 parent 21ef6e5 commit 3b12e78
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 4 deletions.
34 changes: 31 additions & 3 deletions src/aro/Preprocessor.zig
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ const RawToken = Tokenizer.Token;
const Tree = @import("Tree.zig");
const Token = Tree.Token;
const TokenWithExpansionLocs = Tree.TokenWithExpansionLocs;
const ucn = @import("ucn.zig");

const DefineMap = std.StringHashMapUnmanaged(Macro);
const RawTokenList = std.ArrayList(RawToken);
Expand Down Expand Up @@ -991,7 +992,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
}
},
}
pp.addTokenAssumeCapacity(tok);
pp.addTokenAssumeCapacity(try pp.unescapeUcn(tok));
}
try pp.addToken(.{
.id = .eof,
Expand Down Expand Up @@ -2398,6 +2399,32 @@ fn expandMacroExhaustive(
buf.items.len = moving_end_idx;
}

fn writeUnescapedChar(pp: *Preprocessor, codepoint: u32) !void {
var space: [4]u8 = undefined;
const len = std.unicode.utf8Encode(@as(u21, @intCast(codepoint)), space[0..]) catch @panic("todo");
pp.comp.generated_buf.appendSliceAssumeCapacity(space[0..len]);
}

fn unescapeUcn(pp: *Preprocessor, tok: TokenWithExpansionLocs) !TokenWithExpansionLocs {
if (tok.id == .extended_identifier) {
@branchHint(.cold);
const identifier = pp.expandedSlice(tok);
if (mem.indexOfScalar(u8, identifier, '\\') != null) {
@branchHint(.cold);
const start = pp.comp.generated_buf.items.len;
try pp.comp.generated_buf.ensureUnusedCapacity(pp.gpa, identifier.len + 1);

var it: ucn.CharIterator = .{ .str = identifier };
while (it.next()) |c| {
try pp.writeUnescapedChar(c);
}
pp.comp.generated_buf.appendAssumeCapacity('\n');
return pp.makeGeneratedToken(start, .extended_identifier, tok);
}
}
return tok;
}

/// Try to expand a macro after a possible candidate has been read from the `tokenizer`
/// into the `raw` token passed as argument
fn expandMacro(pp: *Preprocessor, tokenizer: *Tokenizer, raw: RawToken) MacroError!void {
Expand Down Expand Up @@ -2427,7 +2454,7 @@ fn expandMacro(pp: *Preprocessor, tokenizer: *Tokenizer, raw: RawToken) MacroErr
continue;
}
tok.id.simplifyMacroKeywordExtra(true);
pp.addTokenAssumeCapacity(tok.*);
pp.addTokenAssumeCapacity(try pp.unescapeUcn(tok.*));
}
if (pp.preserve_whitespace) {
try pp.ensureUnusedTokenCapacity(pp.add_expansion_nl);
Expand Down Expand Up @@ -3100,7 +3127,8 @@ fn makePragmaToken(pp: *Preprocessor, raw: RawToken, operator_loc: ?Source.Locat
return tok;
}

pub fn addToken(pp: *Preprocessor, tok: TokenWithExpansionLocs) !void {
pub fn addToken(pp: *Preprocessor, tok_arg: TokenWithExpansionLocs) !void {
const tok = try pp.unescapeUcn(tok_arg);
if (tok.expansion_locs) |expansion_locs| {
try pp.expansion_entries.append(pp.gpa, .{ .idx = @intCast(pp.tokens.len), .locs = expansion_locs });
}
Expand Down
36 changes: 35 additions & 1 deletion src/aro/Tokenizer.zig
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ pub const Token = struct {
eof,
/// identifier containing solely basic character set characters
identifier,
/// identifier with at least one extended character
/// identifier with at least one extended character or UCN escape sequence
extended_identifier,

// string literals with prefixes
Expand Down Expand Up @@ -1074,14 +1074,45 @@ pub fn next(self: *Tokenizer) Token {
pp_num,
pp_num_exponent,
pp_num_digit_separator,
ucn_slash,
ucn,
} = .start;

var start = self.index;
var id: Token.Id = .eof;
var ucn_wants: u8 = undefined;
var ucn_consumed: u8 = undefined;

while (self.index < self.buf.len) : (self.index += 1) {
const c = self.buf[self.index];
switch (state) {
.ucn_slash => switch (c) {
'u' => {
ucn_wants = 4;
ucn_consumed = 0;
state = .ucn;
},
'U' => {
ucn_wants = 8;
ucn_consumed = 0;
state = .ucn;
},
else => {
id = .invalid;
break;
},
},
.ucn => switch (c) {
'a'...'f', 'A'...'F', '0'...'9' => {
ucn_consumed += 1;
if (ucn_consumed == ucn_wants) {
state = .extended_identifier;
}
},
else => {
@panic("todo");
},
},
.start => switch (c) {
'\n' => {
id = .nl;
Expand All @@ -1100,6 +1131,7 @@ pub fn next(self: *Tokenizer) Token {
'u' => state = .u,
'U' => state = .U,
'L' => state = .L,
'\\' => state = .ucn_slash,
'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier,
'=' => state = .equal,
'!' => state = .bang,
Expand Down Expand Up @@ -1325,6 +1357,7 @@ pub fn next(self: *Tokenizer) Token {
break;
},
0x80...0xFF => state = .extended_identifier,
'\\' => state = .ucn_slash,
else => {
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
break;
Expand Down Expand Up @@ -1732,6 +1765,7 @@ pub fn next(self: *Tokenizer) Token {
}
} else if (self.index == self.buf.len) {
switch (state) {
.ucn_slash, .ucn => @panic("todo"),
.start, .line_comment => {},
.u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]),
.extended_identifier => id = .extended_identifier,
Expand Down
63 changes: 63 additions & 0 deletions src/aro/ucn.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
const std = @import("std");

const DecodedUniversalChar = struct {
codepoint: u32,
consumed: usize,
};

/// Decodes a C99-style universal character name (e.g., \uXXXX or \UXXXXXXXX)
/// into a unicode codepoint. Returns the decoded character and the number of
/// bytes consumed from the input string.
fn decodeUniversalChar(input: []const u8) ?DecodedUniversalChar {
const is_long = input[1] == 'U';
const required: usize = if (is_long) 10 else 6;

if (input.len < required)
return null;

const hex_part = input[2..required];
var codepoint: u32 = 0;
for (hex_part) |c| {
codepoint *= 16;
const value = switch (c) {
'0'...'9' => c - '0',
'a'...'f' => 10 + (c - 'a'),
'A'...'F' => 10 + (c - 'A'),
else => return null,
};
codepoint += value;
}

return .{ .codepoint = codepoint, .consumed = required };
}

pub const CharIterator = struct {
str: []const u8,
i: usize = 0,

pub fn next(self: *@This()) ?u32 {
if (self.i >= self.str.len) return null;
if (self.str[self.i] == '\\' and self.i + 1 < self.str.len and (self.str[self.i + 1] == 'u' or self.str[self.i + 1] == 'U')) {
const decoded = decodeUniversalChar(self.str[self.i..]) orelse {
self.i += 1;
return '\\';
};
self.i += decoded.consumed;
return decoded.codepoint;
} else {
const len = std.unicode.utf8ByteSequenceLength(self.str[self.i]) catch 1;
const cp = switch (len) {
1 => self.str[self.i],
2 => std.unicode.utf8Decode2(self.str[self.i..][0..2].*),
3 => std.unicode.utf8Decode3(self.str[self.i..][0..3].*),
4 => std.unicode.utf8Decode4(self.str[self.i..][0..4].*),
else => unreachable,
} catch {
defer self.i += 1;
return self.str[self.i];
};
self.i += len;
return cp;
}
}
};
17 changes: 17 additions & 0 deletions test/cases/ucn identifiers.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
int foo(void) {
int \u4F60\u597D = 5;
int \u0061 = 5; // TODO: error: character 'a' cannot be specified by a universal character name
return 你好;
}

struct S {
int 你好;
};

int bar(int x) {
struct S s;
s.\u4F60\u597D = x;
return s.你好;
}

#define TESTS_SKIPPED 1

0 comments on commit 3b12e78

Please sign in to comment.