When the buffer was separated from the tokenizer, we lost some validation, including really aggressive carriage return detection. This brings this back in full force and adds some additional validation on top of it.
28 lines
768 B
Zig
28 lines
768 B
Zig
const std = @import("std");
|
|
|
|
const nice = @import("nice");
|
|
|
|
pub fn main() !void {
|
|
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
|
defer _ = gpa.deinit();
|
|
const allocator = gpa.allocator();
|
|
|
|
const args = try std.process.argsAlloc(allocator);
|
|
defer std.process.argsFree(allocator, args);
|
|
if (args.len < 2) return;
|
|
|
|
const data = try std.fs.cwd().readFileAlloc(allocator, args[1], 4_294_967_295);
|
|
var needfree = true;
|
|
defer if (needfree) allocator.free(data);
|
|
|
|
const document = try nice.parseBuffer(allocator, data, .{});
|
|
defer document.deinit();
|
|
|
|
// free data memory to ensure that the parsed document is not holding
|
|
// references to it.
|
|
allocator.free(data);
|
|
needfree = false;
|
|
|
|
document.printDebug();
|
|
}
|