summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorYorhel <git@yorhel.nl>2021-05-29 10:51:17 +0200
committerYorhel <git@yorhel.nl>2021-05-29 10:54:45 +0200
commitc077c5bed5378b298501da8054824fdef7c3a8ed (patch)
treeb814ff0ebf397fcfb4d853b8eb298d7c5356dd2c /src
parent9474aa4329110c1c8e3b24724c442a95bd49c947 (diff)
Implement JSON file import
Performance is looking great, but the code is rather ugly and potentially buggy. Also doesn't handle hard links without an "nlink" field yet. Error handling of the import code is different from what I've been doing until now. That's intentional, I'll change error handling of other pieces to call ui.die() directly rather than propagating error enums. The approach is less testable but conceptually simpler, it's perfectly fine for a tiny application like ncdu.
Diffstat (limited to 'src')
-rw-r--r--src/browser.zig8
-rw-r--r--src/main.zig24
-rw-r--r--src/model.zig8
-rw-r--r--src/scan.zig549
-rw-r--r--src/ui.zig1
5 files changed, 500 insertions, 90 deletions
diff --git a/src/browser.zig b/src/browser.zig
index 469bc8c..042537e 100644
--- a/src/browser.zig
+++ b/src/browser.zig
@@ -324,11 +324,13 @@ pub fn draw() !void {
ui.addch('?');
ui.style(.hd);
ui.addstr(" for help");
- if (main.config.read_only) {
+ if (main.config.imported) {
+ ui.move(0, saturateSub(ui.cols, 10));
+ ui.addstr("[imported]");
+ } else if (main.config.read_only) {
ui.move(0, saturateSub(ui.cols, 10));
ui.addstr("[readonly]");
}
- // TODO: [imported] indicator
ui.style(.default);
ui.move(1,0);
@@ -387,7 +389,7 @@ fn sortToggle(col: main.config.SortCol, default_order: main.config.SortOrder) vo
sortDir();
}
-pub fn key(ch: i32) !void {
+pub fn keyInput(ch: i32) !void {
if (need_confirm_quit) {
switch (ch) {
'y', 'Y' => if (need_confirm_quit) ui.quit(),
diff --git a/src/main.zig b/src/main.zig
index 573b931..e0bdba6 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -37,6 +37,7 @@ pub const config = struct {
pub var sort_dirsfirst: bool = false;
pub var read_only: bool = false;
+ pub var imported: bool = false;
pub var can_shell: bool = true;
pub var confirm_quit: bool = false;
};
@@ -164,7 +165,6 @@ fn readExcludeFile(path: []const u8) !void {
// TODO: Better error reporting
pub fn main() !void {
// Grab thousands_sep from the current C locale.
- // (We can safely remove this when not linking against libc, it's a somewhat obscure feature)
_ = c.setlocale(c.LC_ALL, "");
if (c.localeconv()) |locale| {
if (locale.*.thousands_sep) |sep| {
@@ -176,8 +176,8 @@ pub fn main() !void {
var args = Args(std.process.ArgIteratorPosix).init(std.process.ArgIteratorPosix.init());
var scan_dir: ?[]const u8 = null;
- var import_file: ?[]const u8 = null;
- var export_file: ?[]const u8 = null;
+ var import_file: ?[:0]const u8 = null;
+ var export_file: ?[:0]const u8 = null;
var has_scan_ui = false;
_ = args.next(); // program name
while (args.next()) |opt| {
@@ -197,7 +197,9 @@ pub fn main() !void {
else if(opt.is("-0")) { has_scan_ui = true; config.scan_ui = .none; }
else if(opt.is("-1")) { has_scan_ui = true; config.scan_ui = .line; }
else if(opt.is("-2")) { has_scan_ui = true; config.scan_ui = .full; }
+ else if(opt.is("-o") and export_file != null) ui.die("The -o flag can only be given once.\n", .{})
else if(opt.is("-o")) export_file = args.arg()
+ else if(opt.is("-f") and import_file != null) ui.die("The -f flag can only be given once.\n", .{})
else if(opt.is("-f")) import_file = args.arg()
else if(opt.is("--si")) config.si = true
else if(opt.is("-L") or opt.is("--follow-symlinks")) config.follow_symlinks = true
@@ -220,24 +222,28 @@ pub fn main() !void {
ui.die("The --exclude-kernfs tag is currently only supported on Linux.\n", .{});
const out_tty = std.io.getStdOut().isTty();
+ const in_tty = std.io.getStdIn().isTty();
if (!has_scan_ui) {
if (export_file) |f| {
if (!out_tty or std.mem.eql(u8, f, "-")) config.scan_ui = .none
else config.scan_ui = .line;
}
}
- config.nc_tty = if (export_file) |f| std.mem.eql(u8, f, "-") else false;
+ if (!in_tty and import_file == null and export_file == null)
+ ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
+ config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
event_delay_timer = try std.time.Timer.start();
defer ui.deinit();
+ state = .scan;
var out_file = if (export_file) |f| (
if (std.mem.eql(u8, f, "-")) std.io.getStdOut()
- else try std.fs.cwd().createFile(f, .{})
+ else try std.fs.cwd().createFileZ(f, .{})
) else null;
- state = .scan;
- try scan.scanRoot(scan_dir orelse ".", out_file);
+ try if (import_file) |f| scan.importRoot(f, out_file)
+ else scan.scanRoot(scan_dir orelse ".", out_file);
if (out_file != null) return;
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
@@ -274,8 +280,8 @@ pub fn handleEvent(block: bool, force_draw: bool) !void {
if (ch == 0) return;
if (ch == -1) return handleEvent(firstblock, true);
switch (state) {
- .scan => try scan.key(ch),
- .browse => try browser.key(ch),
+ .scan => try scan.keyInput(ch),
+ .browse => try browser.keyInput(ch),
}
firstblock = false;
}
diff --git a/src/model.zig b/src/model.zig
index 31474cf..760300e 100644
--- a/src/model.zig
+++ b/src/model.zig
@@ -196,10 +196,10 @@ pub const File = packed struct {
};
pub const Ext = packed struct {
- mtime: u64,
- uid: u32,
- gid: u32,
- mode: u16,
+ mtime: u64 = 0,
+ uid: u32 = 0,
+ gid: u32 = 0,
+ mode: u16 = 0,
};
comptime {
diff --git a/src/scan.zig b/src/scan.zig
index 0af71a7..b90b847 100644
--- a/src/scan.zig
+++ b/src/scan.zig
@@ -9,15 +9,16 @@ const c_fnmatch = @cImport(@cInclude("fnmatch.h"));
// Concise stat struct for fields we're interested in, with the types used by the model.
const Stat = struct {
- blocks: u61,
- size: u64,
- dev: u64,
- ino: u64,
- nlink: u32,
- dir: bool,
- reg: bool,
- symlink: bool,
- ext: model.Ext,
+ blocks: u61 = 0,
+ size: u64 = 0,
+ dev: u64 = 0,
+ ino: u64 = 0,
+ nlink: u32 = 0,
+ hlinkc: bool = false,
+ dir: bool = false,
+ reg: bool = true,
+ symlink: bool = false,
+ ext: model.Ext = .{},
fn clamp(comptime T: type, comptime field: anytype, x: anytype) std.meta.fieldInfo(T, field).field_type {
return castClamp(std.meta.fieldInfo(T, field).field_type, x);
@@ -35,6 +36,7 @@ const Stat = struct {
.dev = truncate(Stat, .dev, stat.dev),
.ino = truncate(Stat, .ino, stat.ino),
.nlink = clamp(Stat, .nlink, stat.nlink),
+ .hlinkc = stat.nlink > 1 and !std.os.system.S_ISDIR(stat.mode),
.dir = std.os.system.S_ISDIR(stat.mode),
.reg = std.os.system.S_ISREG(stat.mode),
.symlink = std.os.system.S_ISLNK(stat.mode),
@@ -103,27 +105,20 @@ fn writeJsonString(wr: anytype, s: []const u8) !void {
// ctx.pushPath(name)
// ctx.stat = ..;
// ctx.addSpecial() or ctx.addStat()
-// if (is_dir) {
-// // ctx.enterDir() is implicit in ctx.addStat() for directory entries.
+// if (ctx.stat.dir) {
// // repeat top-level steps for files in dir, recursively.
-// ctx.leaveDir();
// }
// ctx.popPath();
//
-// (Multithreaded scanning note: when scanning to RAM, we can support multiple
-// of these Contexts in parallel, just need to make sure to lock any access to
-// model.* related functions. Parallel scanning to a file will require major
-// changes to the export format or buffering with specially guided scanning to
-// avoid buffering /everything/... neither seems fun)
const Context = struct {
// When scanning to RAM
- parents: ?*model.Parents = null,
+ parents: ?model.Parents = null,
// When scanning to a file
- wr: ?std.io.BufferedWriter(4096, std.fs.File.Writer).Writer = null,
+ wr: ?*Writer = null,
path: std.ArrayList(u8) = std.ArrayList(u8).init(main.allocator),
path_indices: std.ArrayList(usize) = std.ArrayList(usize).init(main.allocator),
- items_seen: u32 = 1,
+ items_seen: u32 = 0,
// 0-terminated name of the top entry, points into 'path', invalid after popPath().
// This is a workaround to Zig's directory iterator not returning a [:0]const u8.
@@ -133,8 +128,31 @@ const Context = struct {
stat: Stat = undefined,
+ const Writer = std.io.BufferedWriter(4096, std.fs.File.Writer);
const Self = @This();
+ fn initFile(out: std.fs.File) !Self {
+ var buf = try main.allocator.create(Writer);
+ errdefer main.allocator.destroy(buf);
+ buf.* = std.io.bufferedWriter(out.writer());
+ var wr = buf.writer();
+ try wr.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
+ try wr.print("{d}", .{std.time.timestamp()});
+ try wr.writeByte('}');
+ return Self{ .wr = buf };
+ }
+
+ fn initMem() Self {
+ return Self{ .parents = model.Parents{} };
+ }
+
+ fn final(self: *Self) !void {
+ if (self.wr) |wr| {
+ try wr.writer().writeByte(']');
+ try wr.flush();
+ }
+ }
+
// Add the name of the file/dir entry we're currently inspecting
fn pushPath(self: *Self, name: []const u8) !void {
try self.path_indices.append(self.path.items.len);
@@ -145,14 +163,17 @@ const Context = struct {
try self.path.append(0);
self.name = self.path.items[start..self.path.items.len-1:0];
self.path.items.len -= 1;
-
- self.items_seen += 1;
- self.stat.dir = false; // used by addSpecial(); if we've failed to stat() then don't consider it a dir.
}
fn popPath(self: *Self) void {
self.path.items.len = self.path_indices.items[self.path_indices.items.len-1];
self.path_indices.items.len -= 1;
+
+ if (self.stat.dir) {
+ if (self.parents) |*p| if (p.top() != model.root) p.pop();
+ if (self.wr) |w| w.writer().writeByte(']') catch ui.die("Error writing to file.", .{});
+ } else
+ self.stat.dir = true; // repeated popPath()s mean we're closing parent dirs.
}
fn pathZ(self: *Self) [:0]const u8 {
@@ -162,19 +183,22 @@ const Context = struct {
// Set a flag to indicate that there was an error listing file entries in the current directory.
// (Such errors are silently ignored when exporting to a file, as the directory metadata has already been written)
fn setDirlistError(self: *Self) void {
- if (self.parents) |p| p.top().entry.set_err(p);
+ if (self.parents) |*p| p.top().entry.set_err(p);
}
const Special = enum { err, other_fs, kernfs, excluded };
// Insert the current path as a special entry (i.e. a file/dir that is not counted)
+ // Ignores self.stat except for the 'dir' option.
fn addSpecial(self: *Self, t: Special) !void {
+ std.debug.assert(self.items_seen > 0); // root item can't be a special
+
if (t == .err) {
if (self.last_error) |p| main.allocator.free(p);
self.last_error = try main.allocator.dupeZ(u8, self.path.items);
}
- if (self.parents) |p| {
+ if (self.parents) |*p| {
var e = try model.Entry.create(.file, false, self.name);
e.insert(p) catch unreachable;
var f = e.file().?;
@@ -184,9 +208,9 @@ const Context = struct {
.kernfs => f.kernfs = true,
.excluded => f.excluded = true,
}
- }
- if (self.wr) |w| {
+ } else if (self.wr) |wr| {
+ var w = wr.writer();
try w.writeAll(",\n");
if (self.stat.dir) try w.writeByte('[');
try w.writeAll("{\"name\":");
@@ -200,30 +224,37 @@ const Context = struct {
try w.writeByte('}');
if (self.stat.dir) try w.writeByte(']');
}
+ self.items_seen += 1;
}
// Insert current path as a counted file/dir/hardlink, with information from self.stat
fn addStat(self: *Self, dir_dev: u64) !void {
- if (self.parents) |p| {
+ if (self.parents) |*p| {
const etype = if (self.stat.dir) model.EType.dir
- else if (self.stat.nlink > 1) model.EType.link
+ else if (self.stat.hlinkc) model.EType.link
else model.EType.file;
var e = try model.Entry.create(etype, main.config.extended, self.name);
e.blocks = self.stat.blocks;
e.size = self.stat.size;
if (e.dir()) |d| d.dev = try model.getDevId(self.stat.dev);
if (e.file()) |f| f.notreg = !self.stat.dir and !self.stat.reg;
+ // TODO: Handle the scenario where we don't know the hard link count
+ // (i.e. on imports from old ncdu versions that don't have the "nlink" field)
if (e.link()) |l| {
l.ino = self.stat.ino;
l.nlink = self.stat.nlink;
}
if (e.ext()) |ext| ext.* = self.stat.ext;
- try e.insert(p);
- if (e.dir()) |d| try p.push(d); // Enter the directory
- }
+ if (self.items_seen == 0)
+ model.root = e.dir().?
+ else {
+ try e.insert(p);
+ if (e.dir()) |d| try p.push(d); // Enter the directory
+ }
- if (self.wr) |w| {
+ } else if (self.wr) |wr| {
+ var w = wr.writer();
try w.writeAll(",\n");
if (self.stat.dir) try w.writeByte('[');
try w.writeAll("{\"name\":");
@@ -231,23 +262,21 @@ const Context = struct {
if (self.stat.size > 0) try w.print(",\"asize\":{d}", .{ self.stat.size });
if (self.stat.blocks > 0) try w.print(",\"dsize\":{d}", .{ blocksToSize(self.stat.blocks) });
if (self.stat.dir and self.stat.dev != dir_dev) try w.print(",\"dev\":{d}", .{ self.stat.dev });
- if (!self.stat.dir and self.stat.nlink > 1) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
+ if (self.stat.hlinkc) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
if (!self.stat.dir and !self.stat.reg) try w.writeAll(",\"notreg\":true");
if (main.config.extended)
try w.print(",\"uid\":{d},\"gid\":{d},\"mode\":{d},\"mtime\":{d}",
.{ self.stat.ext.uid, self.stat.ext.gid, self.stat.ext.mode, self.stat.ext.mtime });
try w.writeByte('}');
}
- }
- fn leaveDir(self: *Self) !void {
- if (self.parents) |p| p.pop();
- if (self.wr) |w| try w.writeByte(']');
+ self.items_seen += 1;
}
fn deinit(self: *Self) void {
if (self.last_error) |p| main.allocator.free(p);
- if (self.parents) |p| p.deinit();
+ if (self.parents) |*p| p.deinit();
+ if (self.wr) |p| main.allocator.destroy(p);
self.path.deinit();
self.path_indices.deinit();
}
@@ -259,6 +288,7 @@ var active_context: ?*Context = null;
// Read and index entries of the given dir.
// (TODO: shouldn't error on OOM but instead call a function that waits or something)
fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) (std.fs.File.Writer.Error || std.mem.Allocator.Error)!void {
+ // XXX: The iterator allocates 8k+ bytes on the stack, may want to do heap allocation here?
var it = dir.iterate();
while(true) {
const entry = it.next() catch {
@@ -266,6 +296,7 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) (std.fs.File.Writer.Err
return;
} orelse break;
+ ctx.stat.dir = false;
try ctx.pushPath(entry.name);
defer ctx.popPath();
try main.handleEvent(false, false);
@@ -335,54 +366,426 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) (std.fs.File.Writer.Err
}
try ctx.addStat(dir_dev);
-
- if (ctx.stat.dir) {
- try scanDir(ctx, edir.?, ctx.stat.dev);
- try ctx.leaveDir();
- }
+ if (ctx.stat.dir) try scanDir(ctx, edir.?, ctx.stat.dev);
}
}
pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
- const full_path = std.fs.realpathAlloc(main.allocator, path) catch path;
- defer main.allocator.free(full_path);
-
- var ctx = Context{};
- defer ctx.deinit();
- try ctx.pushPath(full_path);
+ var ctx = if (out) |f| try Context.initFile(f) else Context.initMem();
active_context = &ctx;
defer active_context = null;
+ defer ctx.deinit();
+
+ const full_path = std.fs.realpathAlloc(main.allocator, path) catch null;
+ defer if (full_path) |p| main.allocator.free(p);
+ try ctx.pushPath(full_path orelse path);
ctx.stat = try Stat.read(std.fs.cwd(), ctx.pathZ(), true);
if (!ctx.stat.dir) return error.NotADirectory;
+ try ctx.addStat(0);
- var parents = model.Parents{};
- var buf = if (out) |f| std.io.bufferedWriter(f.writer()) else undefined;
+ var dir = try std.fs.cwd().openDirZ(ctx.pathZ(), .{ .access_sub_paths = true, .iterate = true });
+ defer dir.close();
+ try scanDir(&ctx, dir, ctx.stat.dev);
+ ctx.popPath();
+ try ctx.final();
+}
- if (out) |f| {
- ctx.wr = buf.writer();
- try ctx.wr.?.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
- try ctx.wr.?.print("{d}", .{std.time.timestamp()});
- try ctx.wr.?.writeByte('}');
- try ctx.addStat(0);
+// Using a custom recursive descent JSON parser here. std.json is great, but
+// has two major downsides:
+// - It does strict UTF-8 validation. Which is great in general, but not so
+// much for ncdu dumps that may contain non-UTF-8 paths encoded as strings.
+// - The streaming parser requires complex and overly large buffering in order
+// to read strings, which doesn't work so well in our case.
+//
+// TODO: This code isn't very elegant and is likely contains bugs. It may be
+// worth factoring out the JSON parts into a separate abstraction for which
+// tests can be written.
+const Import = struct {
+ ctx: Context,
+ rd: std.io.BufferedReader(4096, std.fs.File.Reader),
+ ch: u8 = 0, // last read character, 0 = EOF (or invalid null byte, who cares)
+ byte: u64 = 1,
+ line: u64 = 1,
+ namebuf: [32*1024]u8 = undefined,
- } else {
- ctx.parents = &parents;
- model.root = (try model.Entry.create(.dir, false, full_path)).dir().?;
- model.root.entry.blocks = ctx.stat.blocks;
- model.root.entry.size = ctx.stat.size;
- model.root.dev = try model.getDevId(ctx.stat.dev);
- if (model.root.entry.ext()) |ext| ext.* = ctx.stat.ext;
+ const Self = @This();
+
+ fn die(self: *Self, str: []const u8) noreturn {
+ ui.die("Error importing file on line {}:{}: {s}.\n", .{ self.line, self.byte, str });
}
- var dir = try std.fs.cwd().openDirZ(ctx.pathZ(), .{ .access_sub_paths = true, .iterate = true });
- defer dir.close();
- try scanDir(&ctx, dir, ctx.stat.dev);
- if (out != null) {
- try ctx.leaveDir();
- try ctx.wr.?.writeByte(']');
- try buf.flush();
+ // Advance to the next byte, sets ch.
+ fn con(self: *Self) void {
+ // XXX: This indirection through a BufferedReader to just read 1 byte
+ // at a time may have some extra overhead. Wrapping our own LinearFifo
+ // may or may not be worth it, needs benchmarking.
+ self.ch = self.rd.reader().readByte() catch |e| switch (e) {
+ error.EndOfStream => 0,
+ error.InputOutput => self.die("I/O error"),
+ // TODO: This one can be retried
+ error.SystemResources => self.die("out of memory"),
+ else => unreachable,
+ };
+ self.byte += 1;
+ }
+
+ // Advance to the next non-whitespace byte.
+ fn conws(self: *Self) void {
+ while (true) {
+ switch (self.ch) {
+ '\n' => {
+ self.line += 1;
+ self.byte = 1;
+ },
+ ' ', '\t', '\r' => {},
+ else => break,
+ }
+ self.con();
+ }
+ }
+
+ // Returns the current byte and advances to the next.
+ fn next(self: *Self) u8 {
+ defer self.con();
+ return self.ch;
+ }
+
+ fn hexdig(self: *Self) u16 {
+ return switch (self.ch) {
+ '0'...'9' => self.next() - '0',
+ 'a'...'f' => self.next() - 'a' + 10,
+ 'A'...'F' => self.next() - 'A' + 10,
+ else => self.die("invalid hex digit"),
+ };
+ }
+
+ // Read a string into buf.
+ // Any characters beyond the size of the buffer are consumed but otherwise discarded.
+ // (May store fewer characters in the case of \u escapes, it's not super precise)
+ fn string(self: *Self, buf: []u8) []u8 {
+ std.debug.assert(self.ch == '"');
+ if (self.next() != '"') self.die("expected '\"'");
+ var n: u64 = 0;
+ while (true) {
+ const ch = self.next();
+ switch (ch) {
+ '"' => break,
+ '\\' => switch (self.next()) {
+ '"' => if (n < buf.len) { buf[n] = '"'; n += 1; },
+ '\\'=> if (n < buf.len) { buf[n] = '\\';n += 1; },
+ '/' => if (n < buf.len) { buf[n] = '/'; n += 1; },
+ 'b' => if (n < buf.len) { buf[n] = 0x8; n += 1; },
+ 'f' => if (n < buf.len) { buf[n] = 0xc; n += 1; },
+ 'n' => if (n < buf.len) { buf[n] = 0xa; n += 1; },
+ 'r' => if (n < buf.len) { buf[n] = 0xd; n += 1; },
+ 't' => if (n < buf.len) { buf[n] = 0x9; n += 1; },
+ 'u' => {
+ const char = (self.hexdig()<<12) + (self.hexdig()<<8) + (self.hexdig()<<4) + self.hexdig();
+ if (n + 6 < buf.len)
+ n += std.unicode.utf8Encode(char, buf[n..n+5]) catch unreachable;
+ },
+ else => self.die("invalid escape sequence"),
+ },
+ 0x20, 0x21, 0x23...0x5b, 0x5d...0xff => if (n < buf.len) { buf[n] = ch; n += 1; },
+ else => self.die("invalid character in string"),
+ }
+ }
+ return buf[0..n];
+ }
+
+ fn uint(self: *Self, T: anytype) T {
+ if (self.ch == '0') {
+ self.con();
+ return 0;
+ }
+ var v: T = 0;
+ while (self.ch >= '0' and self.ch <= '9') {
+ const newv = v *% 10 +% (self.ch - '0');
+ if (newv < v) self.die("integer out of range");
+ v = newv;
+ self.con();
+ }
+ if (v == 0) self.die("expected number");
+ return v;
+ }
+
+ fn boolean(self: *Self) bool {
+ switch (self.next()) {
+ 't' => {
+ if (self.next() == 'r' and self.next() == 'u' and self.next() == 'e')
+ return true;
+ },
+ 'f' => {
+ if (self.next() == 'a' and self.next() == 'l' and self.next() == 's' and self.next() == 'e')
+ return false;
+ },
+ else => {}
+ }
+ self.die("expected boolean");
+ }
+
+ // Consume and discard any JSON value.
+ fn conval(self: *Self) void {
+ switch (self.ch) {
+ 't' => _ = self.boolean(),
+ 'f' => _ = self.boolean(),
+ 'n' => {
+ self.con();
+ if (!(self.next() == 'u' and self.next() == 'l' and self.next() == 'l'))
+ self.die("invalid JSON value");
+ },
+ '"' => _ = self.string(&[0]u8{}),
+ '{' => {
+ self.con();
+ self.conws();
+ if (self.ch == '}') { self.con(); return; }
+ while (true) {
+ self.conws();
+ _ = self.string(&[0]u8{});
+ self.conws();
+ if (self.next() != ':') self.die("expected ':'");
+ self.conws();
+ self.conval();
+ self.conws();
+ switch (self.next()) {
+ ',' => continue,
+ '}' => break,
+ else => self.die("expected ',' or '}'"),
+ }
+ }
+ },
+ '[' => {
+ self.con();
+ self.conws();
+ if (self.ch == ']') { self.con(); return; }
+ while (true) {
+ self.conws();
+ self.conval();
+ self.conws();
+ switch (self.next()) {
+ ',' => continue,
+ ']' => break,
+ else => self.die("expected ',' or ']'"),
+ }
+ }
+ },
+ '-', '0'...'9' => {
+ self.con();
+ // Numbers are kind of annoying, this "parsing" is invalid and ultra-lazy.
+ while (true) {
+ switch (self.ch) {
+ '-', '+', 'e', 'E', '.', '0'...'9' => self.con(),
+ else => return,
+ }
+ }
+ },
+ else => self.die("invalid JSON value"),
+ }
}
+
+ fn itemkey(self: *Self, key: []const u8, name: *?[]u8, special: *?Context.Special) void {
+ const eq = std.mem.eql;
+ switch (if (key.len > 0) key[0] else @as(u8,0)) {
+ 'a' => {
+ if (eq(u8, key, "asize")) {
+ self.ctx.stat.size = self.uint(u64);
+ return;
+ }
+ },
+ 'd' => {
+ if (eq(u8, key, "dsize")) {
+ self.ctx.stat.blocks = @intCast(u61, self.uint(u64)>>9);
+ return;
+ }
+ if (eq(u8, key, "dev")) {
+ self.ctx.stat.dev = self.uint(u64);
+ return;
+ }
+ },
+ 'e' => {
+ if (eq(u8, key, "excluded")) {
+ var buf: [32]u8 = undefined;
+ const typ = self.string(&buf);
+ // "frmlnk" is also possible, but currently considered equivalent to "pattern".
+ if (eq(u8, typ, "otherfs")) special.* = .other_fs
+ else if (eq(u8, typ, "kernfs")) special.* = .kernfs
+ else special.* = .excluded;
+ }
+ },
+ 'g' => {
+ if (eq(u8, key, "gid")) {
+ self.ctx.stat.ext.gid = self.uint(u32);
+ return;
+ }
+ },
+ 'h' => {
+ if (eq(u8, key, "hlinkc")) {
+ self.ctx.stat.hlinkc = true;
+ return;
+ }
+ },
+ 'i' => {
+ if (eq(u8, key, "ino")) {
+ self.ctx.stat.ino = self.uint(u64);
+ return;
+ }
+ },
+ 'm' => {
+ if (eq(u8, key, "mode")) {
+ self.ctx.stat.ext.mode = self.uint(u16);
+ return;
+ }
+ if (eq(u8, key, "mtime")) {
+ self.ctx.stat.ext.mtime = self.uint(u64);
+ // Accept decimal numbers, but discard the fractional part because our data model doesn't support it.
+ if (self.ch == '.') {
+ self.con();
+ while (self.ch >= '0' and self.ch <= '9')
+ self.con();
+ }
+ return;
+ }
+ },
+ 'n' => {
+ if (eq(u8, key, "name")) {
+ if (name.* != null) self.die("duplicate key");
+ name.* = self.string(&self.namebuf);
+ if (name.*.?.len > self.namebuf.len-5) self.die("too long file name");
+ return;
+ }
+ if (eq(u8, key, "nlink")) {
+ self.ctx.stat.nlink = self.uint(u32);
+ if (!self.ctx.stat.dir and self.ctx.stat.nlink > 1)
+ self.ctx.stat.hlinkc = true;
+ return;
+ }
+ if (eq(u8, key, "notreg")) {
+ self.ctx.stat.reg = !self.boolean();
+ return;
+ }
+ },
+ 'r' => {
+ if (eq(u8, key, "read_error")) {
+ if (self.boolean())
+ special.* = .err;
+ return;
+ }
+ },
+ 'u' => {
+ if (eq(u8, key, "uid")) {
+ self.ctx.stat.ext.uid = self.uint(u32);
+ return;
+ }
+ },
+ else => {},
+ }
+ self.conval();
+ }
+
+ fn iteminfo(self: *Self, dir_dev: u64) void {
+ if (self.next() != '{') self.die("expected '{'");
+ self.ctx.stat.dev = dir_dev;
+ var name: ?[]u8 = null;
+ var special: ?Context.Special = null;
+ while (true) {
+ self.conws();
+ var keybuf: [32]u8 = undefined;
+ const key = self.string(&keybuf);
+ self.conws();
+ if (self.next() != ':') self.die("expected ':'");
+ self.conws();
+ self.itemkey(key, &name, &special);
+ self.conws();
+ switch (self.next()) {
+ ',' => continue,
+ '}' => break,
+ else => self.die("expected ',' or '}'"),
+ }
+ }
+ if (name) |n| self.ctx.pushPath(n) catch unreachable
+ else self.die("missing \"name\" field");
+ if (special) |s| self.ctx.addSpecial(s) catch unreachable
+ else self.ctx.addStat(dir_dev) catch unreachable;
+ }
+
+ fn item(self: *Self, dev: u64) void {
+ self.ctx.stat = .{};
+ if (self.ch == '[') {
+ self.ctx.stat.dir = true;
+ self.con();
+ self.conws();
+ }
+
+ self.iteminfo(dev);
+
+ self.conws();
+ if (self.ctx.stat.dir) {
+ const ndev = self.ctx.stat.dev;
+ while (self.ch == ',') {
+ self.con();
+ self.conws();
+ self.item(ndev);
+ self.conws();
+ }
+ if (self.next() != ']') self.die("expected ',' or ']'");
+ }
+ self.ctx.popPath();
+
+ if ((self.ctx.items_seen & 1023) == 0)
+ main.handleEvent(false, false) catch unreachable;
+ }
+
+ fn root(self: *Self) void {
+ self.con();
+ self.conws();
+ if (self.next() != '[') self.die("expected '['");
+ self.conws();
+ if (self.uint(u16) != 1) self.die("incompatible major format version");
+ self.conws();
+ if (self.next() != ',') self.die("expected ','");
+ self.conws();
+ _ = self.uint(u16); // minor version, ignored for now
+ self.conws();
+ if (self.next() != ',') self.die("expected ','");
+ self.conws();
+ // metadata object
+ if (self.ch != '{') self.die("expected '{'");
+ self.conval(); // completely discarded
+ self.conws();
+ if (self.next() != ',') self.die("expected ','");
+ self.conws();
+ // root element
+ if (self.ch != '[') self.die("expected '['"); // top-level entry must be a dir
+ self.item(0);
+ self.conws();
+ // any trailing elements
+ while (self.ch == ',') {
+ self.con();
+ self.conws();
+ self.conval();
+ self.conws();
+ }
+ if (self.next() != ']') self.die("expected ',' or ']'");
+ self.conws();
+ if (self.ch != 0) self.die("trailing garbage");
+ }
+};
+
+pub fn importRoot(path: [:0]const u8, out: ?std.fs.File) !void {
+ var fd = if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
+ else try std.fs.cwd().openFileZ(path, .{});
+ defer fd.close();
+
+ var imp = Import{
+ .ctx = if (out) |f| try Context.initFile(f) else Context.initMem(),
+ .rd = std.io.bufferedReader(fd.reader()),
+ };
+ active_context = &imp.ctx;
+ defer active_context = null;
+ defer imp.ctx.deinit();
+ imp.root();
+ try imp.ctx.final();
}
var animation_pos: u32 = 0;
@@ -474,7 +877,7 @@ pub fn draw() !void {
}
}
-pub fn key(ch: i32) !void {
+pub fn keyInput(ch: i32) !void {
if (need_confirm_quit) {
switch (ch) {
'y', 'Y' => if (need_confirm_quit) ui.quit(),
diff --git a/src/ui.zig b/src/ui.zig
index 73183f2..4a2f761 100644
--- a/src/ui.zig
+++ b/src/ui.zig
@@ -285,7 +285,6 @@ pub fn init() void {
if (term == null) die("Error initializing ncurses.\n", .{});
_ = c.set_term(term);
} else {
- if (!std.io.getStdIn().isTty()) die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
if (c.initscr() == null) die("Error initializing ncurses.\n", .{});
}
updateSize();