diff --git a/build.zig b/build.zig index 73d8ba4af147..847303340deb 100644 --- a/build.zig +++ b/build.zig @@ -235,7 +235,7 @@ pub fn build(b: *std.Build) !void { }, 2 => { // Untagged development build (e.g. 0.10.0-dev.2025+ecf0050a9). - var it = mem.split(u8, git_describe, "-"); + var it = mem.splitScalar(u8, git_describe, '-'); const tagged_ancestor = it.first(); const commit_height = it.next().?; const commit_id = it.next().?; @@ -280,7 +280,7 @@ pub fn build(b: *std.Build) !void { // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find // the information passed on to us from cmake. if (cfg.cmake_prefix_path.len > 0) { - var it = mem.tokenize(u8, cfg.cmake_prefix_path, ";"); + var it = mem.tokenizeScalar(u8, cfg.cmake_prefix_path, ';'); while (it.next()) |path| { b.addSearchPrefix(path); } @@ -682,7 +682,7 @@ fn addCxxKnownPath( if (!std.process.can_spawn) return error.RequiredLibraryNotFound; const path_padded = b.exec(&.{ ctx.cxx_compiler, b.fmt("-print-file-name={s}", .{objname}) }); - var tokenizer = mem.tokenize(u8, path_padded, "\r\n"); + var tokenizer = mem.tokenizeAny(u8, path_padded, "\r\n"); const path_unpadded = tokenizer.next().?; if (mem.eql(u8, path_unpadded, objname)) { if (errtxt) |msg| { @@ -705,7 +705,7 @@ fn addCxxKnownPath( } fn addCMakeLibraryList(exe: *std.Build.Step.Compile, list: []const u8) void { - var it = mem.tokenize(u8, list, ";"); + var it = mem.tokenizeScalar(u8, list, ';'); while (it.next()) |lib| { if (mem.startsWith(u8, lib, "-l")) { exe.linkSystemLibrary(lib["-l".len..]); @@ -850,18 +850,18 @@ fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig { // .prefix = ZIG_LLVM_LINK_MODE parsed manually below }; - var lines_it = mem.tokenize(u8, config_h_text, "\r\n"); + var lines_it = mem.tokenizeAny(u8, config_h_text, "\r\n"); while (lines_it.next()) |line| { inline for (mappings) |mapping| { if (mem.startsWith(u8, line, mapping.prefix)) { - var it = mem.split(u8, line, "\""); + var it = mem.splitScalar(u8, line, '"'); _ = it.first(); // skip the stuff before the quote const quoted = it.next().?; // the stuff inside the quote @field(ctx, mapping.field) = toNativePathSep(b, quoted); } } if (mem.startsWith(u8, line, "#define ZIG_LLVM_LINK_MODE ")) { - var it = mem.split(u8, line, "\""); + var it = mem.splitScalar(u8, line, '"'); _ = it.next().?; // skip the stuff before the quote const quoted = it.next().?; // the stuff inside the quote ctx.llvm_linkage = if (mem.eql(u8, quoted, "shared")) .dynamic else .static; diff --git a/doc/docgen.zig b/doc/docgen.zig index a49e4f549023..bdbde6f5d241 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -1223,7 +1223,7 @@ fn printShell(out: anytype, shell_content: []const u8, escape: bool) !void { const trimmed_shell_content = mem.trim(u8, shell_content, " \n"); try out.writeAll("
Shell
");
     var cmd_cont: bool = false;
-    var iter = std.mem.split(u8, trimmed_shell_content, "\n");
+    var iter = std.mem.splitScalar(u8, trimmed_shell_content, '\n');
     while (iter.next()) |orig_line| {
         const line = mem.trimRight(u8, orig_line, " ");
         if (!cmd_cont and line.len > 1 and mem.eql(u8, line[0..2], "$ ") and line[line.len - 1] != '\\') {
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index bf0c74bd648e..6ea7153c0d1d 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1388,7 +1388,7 @@ pub fn findProgram(self: *Build, names: []const []const u8, paths: []const []con
             if (fs.path.isAbsolute(name)) {
                 return name;
             }
-            var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter});
+            var it = mem.tokenizeScalar(u8, PATH, fs.path.delimiter);
             while (it.next()) |path| {
                 const full_path = self.pathJoin(&.{
                     path,
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index de68ef6644f6..a1518f7c6a16 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -438,7 +438,7 @@ pub const Manifest = struct {
 
             const input_file_count = self.files.items.len;
             var any_file_changed = false;
-            var line_iter = mem.tokenize(u8, file_contents, "\n");
+            var line_iter = mem.tokenizeScalar(u8, file_contents, '\n');
             var idx: usize = 0;
             if (if (line_iter.next()) |line| !std.mem.eql(u8, line, manifest_header) else true) {
                 if (try self.upgradeToExclusiveLock()) continue;
@@ -467,7 +467,7 @@ pub const Manifest = struct {
                     break :blk new;
                 };
 
-                var iter = mem.tokenize(u8, line, " ");
+                var iter = mem.tokenizeScalar(u8, line, ' ');
                 const size = iter.next() orelse return error.InvalidFormat;
                 const inode = iter.next() orelse return error.InvalidFormat;
                 const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index c77dc3de3664..24ebfef3881b 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -103,8 +103,8 @@ const Action = struct {
         assert(act.tag == .match or act.tag == .not_present);
         const phrase = act.phrase.resolve(b, step);
         var candidate_var: ?struct { name: []const u8, value: u64 } = null;
-        var hay_it = mem.tokenize(u8, mem.trim(u8, haystack, " "), " ");
-        var needle_it = mem.tokenize(u8, mem.trim(u8, phrase, " "), " ");
+        var hay_it = mem.tokenizeScalar(u8, mem.trim(u8, haystack, " "), ' ');
+        var needle_it = mem.tokenizeScalar(u8, mem.trim(u8, phrase, " "), ' ');
 
         while (needle_it.next()) |needle_tok| {
             const hay_tok = hay_it.next() orelse return false;
@@ -155,7 +155,7 @@ const Action = struct {
         var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
         var values = std.ArrayList(u64).init(gpa);
 
-        var it = mem.tokenize(u8, phrase, " ");
+        var it = mem.tokenizeScalar(u8, phrase, ' ');
         while (it.next()) |next| {
             if (mem.eql(u8, next, "+")) {
                 try op_stack.append(.add);
@@ -365,7 +365,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
     var vars = std.StringHashMap(u64).init(gpa);
 
     for (self.checks.items) |chk| {
-        var it = mem.tokenize(u8, output, "\r\n");
+        var it = mem.tokenizeAny(u8, output, "\r\n");
         for (chk.actions.items) |act| {
             switch (act.tag) {
                 .match => {
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 3be40434cb10..7d8647a1cc70 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -853,7 +853,7 @@ fn runPkgConfig(self: *Compile, lib_name: []const u8) ![]const []const u8 {
     var zig_args = ArrayList([]const u8).init(b.allocator);
     defer zig_args.deinit();
 
-    var it = mem.tokenize(u8, stdout, " \r\n\t");
+    var it = mem.tokenizeAny(u8, stdout, " \r\n\t");
     while (it.next()) |tok| {
         if (mem.eql(u8, tok, "-I")) {
             const dir = it.next() orelse return error.PkgConfigInvalidOutput;
@@ -2101,10 +2101,10 @@ fn execPkgConfigList(self: *std.Build, out_code: *u8) (PkgConfigError || ExecErr
     const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore);
     var list = ArrayList(PkgConfigPkg).init(self.allocator);
     errdefer list.deinit();
-    var line_it = mem.tokenize(u8, stdout, "\r\n");
+    var line_it = mem.tokenizeAny(u8, stdout, "\r\n");
     while (line_it.next()) |line| {
         if (mem.trim(u8, line, " \t").len == 0) continue;
-        var tok_it = mem.tokenize(u8, line, " \t");
+        var tok_it = mem.tokenizeAny(u8, line, " \t");
         try list.append(PkgConfigPkg{
             .name = tok_it.next() orelse return error.PkgConfigInvalidOutput,
             .desc = tok_it.rest(),
@@ -2224,7 +2224,7 @@ fn checkCompileErrors(self: *Compile) !void {
     // Render the expected lines into a string that we can compare verbatim.
     var expected_generated = std.ArrayList(u8).init(arena);
 
-    var actual_line_it = mem.split(u8, actual_stderr, "\n");
+    var actual_line_it = mem.splitScalar(u8, actual_stderr, '\n');
     for (self.expect_errors) |expect_line| {
         const actual_line = actual_line_it.next() orelse {
             try expected_generated.appendSlice(expect_line);
diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig
index f6939e0e38ae..4b76e24b26e6 100644
--- a/lib/std/Build/Step/ConfigHeader.zig
+++ b/lib/std/Build/Step/ConfigHeader.zig
@@ -250,14 +250,14 @@ fn render_autoconf(
 
     var any_errors = false;
     var line_index: u32 = 0;
-    var line_it = std.mem.split(u8, contents, "\n");
+    var line_it = std.mem.splitScalar(u8, contents, '\n');
     while (line_it.next()) |line| : (line_index += 1) {
         if (!std.mem.startsWith(u8, line, "#")) {
             try output.appendSlice(line);
             try output.appendSlice("\n");
             continue;
         }
-        var it = std.mem.tokenize(u8, line[1..], " \t\r");
+        var it = std.mem.tokenizeAny(u8, line[1..], " \t\r");
         const undef = it.next().?;
         if (!std.mem.eql(u8, undef, "undef")) {
             try output.appendSlice(line);
@@ -297,14 +297,14 @@ fn render_cmake(
 
     var any_errors = false;
     var line_index: u32 = 0;
-    var line_it = std.mem.split(u8, contents, "\n");
+    var line_it = std.mem.splitScalar(u8, contents, '\n');
     while (line_it.next()) |line| : (line_index += 1) {
         if (!std.mem.startsWith(u8, line, "#")) {
             try output.appendSlice(line);
             try output.appendSlice("\n");
             continue;
         }
-        var it = std.mem.tokenize(u8, line[1..], " \t\r");
+        var it = std.mem.tokenizeAny(u8, line[1..], " \t\r");
         const cmakedefine = it.next().?;
         if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and
             !std.mem.eql(u8, cmakedefine, "cmakedefine01"))
diff --git a/lib/std/SemanticVersion.zig b/lib/std/SemanticVersion.zig
index 26f6f581c83b..4d505b4e30f1 100644
--- a/lib/std/SemanticVersion.zig
+++ b/lib/std/SemanticVersion.zig
@@ -42,8 +42,8 @@ pub fn order(lhs: Version, rhs: Version) std.math.Order {
     if (lhs.pre == null and rhs.pre != null) return .gt;
 
     // Iterate over pre-release identifiers until a difference is found.
-    var lhs_pre_it = std.mem.split(u8, lhs.pre.?, ".");
-    var rhs_pre_it = std.mem.split(u8, rhs.pre.?, ".");
+    var lhs_pre_it = std.mem.splitScalar(u8, lhs.pre.?, '.');
+    var rhs_pre_it = std.mem.splitScalar(u8, rhs.pre.?, '.');
     while (true) {
         const next_lid = lhs_pre_it.next();
         const next_rid = rhs_pre_it.next();
@@ -86,7 +86,7 @@ pub fn parse(text: []const u8) !Version {
     // Parse the required major, minor, and patch numbers.
     const extra_index = std.mem.indexOfAny(u8, text, "-+");
     const required = text[0..(extra_index orelse text.len)];
-    var it = std.mem.split(u8, required, ".");
+    var it = std.mem.splitScalar(u8, required, '.');
     var ver = Version{
         .major = try parseNum(it.first()),
         .minor = try parseNum(it.next() orelse return error.InvalidVersion),
@@ -108,7 +108,7 @@ pub fn parse(text: []const u8) !Version {
     // Check validity of optional pre-release identifiers.
     // See: https://semver.org/#spec-item-9
     if (ver.pre) |pre| {
-        it = std.mem.split(u8, pre, ".");
+        it = std.mem.splitScalar(u8, pre, '.');
         while (it.next()) |id| {
             // Identifiers MUST NOT be empty.
             if (id.len == 0) return error.InvalidVersion;
@@ -127,7 +127,7 @@ pub fn parse(text: []const u8) !Version {
     // Check validity of optional build metadata identifiers.
     // See: https://semver.org/#spec-item-10
     if (ver.build) |build| {
-        it = std.mem.split(u8, build, ".");
+        it = std.mem.splitScalar(u8, build, '.');
         while (it.next()) |id| {
             // Identifiers MUST NOT be empty.
             if (id.len == 0) return error.InvalidVersion;
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 710aaefd5a08..ec69270d1520 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -531,7 +531,7 @@ pub const Version = struct {
         // found no digits or '.' before unexpected character
         if (end == 0) return error.InvalidVersion;
 
-        var it = std.mem.split(u8, text[0..end], ".");
+        var it = std.mem.splitScalar(u8, text[0..end], '.');
         // substring is not empty, first call will succeed
         const major = it.first();
         if (major.len == 0) return error.InvalidVersion;
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 12cc06f39789..7cf0f4681a6d 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -850,7 +850,7 @@ pub const ChildProcess = struct {
                     return original_err;
                 }
 
-                var it = mem.tokenize(u16, PATH, &[_]u16{';'});
+                var it = mem.tokenizeScalar(u16, PATH, ';');
                 while (it.next()) |search_path| {
                     dir_buf.clearRetainingCapacity();
                     try dir_buf.appendSlice(self.allocator, search_path);
@@ -1064,7 +1064,7 @@ fn windowsCreateProcessPathExt(
     // Now we know that at least *a* file matching the wildcard exists, we can loop
     // through PATHEXT in order and exec any that exist
 
-    var ext_it = mem.tokenize(u16, pathext, &[_]u16{';'});
+    var ext_it = mem.tokenizeScalar(u16, pathext, ';');
     while (ext_it.next()) |ext| {
         if (!windowsCreateProcessSupportsExtension(ext)) continue;
 
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 063f1d8077fa..b8dc636693c0 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -337,8 +337,8 @@ pub const Parsed = struct {
             return true; // exact match
         }
 
-        var it_host = std.mem.split(u8, host_name, ".");
-        var it_dns = std.mem.split(u8, dns_name, ".");
+        var it_host = std.mem.splitScalar(u8, host_name, '.');
+        var it_dns = std.mem.splitScalar(u8, dns_name, '.');
 
         const len_match = while (true) {
             const host = it_host.next();
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index cc0f10e395c4..1eeee39a5a10 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -7,9 +7,12 @@ const mem = std.mem;
 const meta = std.meta;
 
 const fields_delimiter = "$";
+const fields_delimiter_scalar = '$';
 const version_param_name = "v";
 const params_delimiter = ",";
+const params_delimiter_scalar = ',';
 const kv_delimiter = "=";
+const kv_delimiter_scalar = '=';
 
 pub const Error = std.crypto.errors.EncodingError || error{NoSpaceLeft};
 
@@ -73,7 +76,7 @@ pub fn BinValue(comptime max_len: usize) type {
 /// Other fields will also be deserialized from the function parameters section.
 pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult {
     var out = mem.zeroes(HashResult);
-    var it = mem.split(u8, str, fields_delimiter);
+    var it = mem.splitScalar(u8, str, fields_delimiter_scalar);
     var set_fields: usize = 0;
 
     while (true) {
@@ -104,7 +107,7 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
 
         // Read optional parameters
         var has_params = false;
-        var it_params = mem.split(u8, field, params_delimiter);
+        var it_params = mem.splitScalar(u8, field, params_delimiter_scalar);
         while (it_params.next()) |params| {
             const param = kvSplit(params) catch break;
             var found = false;
@@ -252,7 +255,7 @@ fn serializeTo(params: anytype, out: anytype) !void {
 
 // Split a `key=value` string into `key` and `value`
 fn kvSplit(str: []const u8) !struct { key: []const u8, value: []const u8 } {
-    var it = mem.split(u8, str, kv_delimiter);
+    var it = mem.splitScalar(u8, str, kv_delimiter_scalar);
     const key = it.first();
     const value = it.next() orelse return Error.InvalidEncoding;
     const ret = .{ .key = key, .value = value };
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index 077de3b5108c..b8e8ef55e2de 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -287,7 +287,7 @@ const crypt_format = struct {
         out.r = try Codec.intDecode(u30, str[4..9]);
         out.p = try Codec.intDecode(u30, str[9..14]);
 
-        var it = mem.split(u8, str[14..], "$");
+        var it = mem.splitScalar(u8, str[14..], '$');
 
         const salt = it.first();
         if (@hasField(T, "salt")) out.salt = salt;
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 746bfde3839e..037f0c81f6bc 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -3021,7 +3021,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
             } else if (argv0.len != 0) {
                 // argv[0] is not empty (and not a path): search it inside PATH
                 const PATH = std.os.getenvZ("PATH") orelse return error.FileNotFound;
-                var path_it = mem.tokenize(u8, PATH, &[_]u8{path.delimiter});
+                var path_it = mem.tokenizeScalar(u8, PATH, path.delimiter);
                 while (path_it.next()) |a_path| {
                     var resolved_path_buf: [MAX_PATH_BYTES - 1:0]u8 = undefined;
                     const resolved_path = std.fmt.bufPrintZ(&resolved_path_buf, "{s}/{s}", .{
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index 4c320ae5cf12..e7a28a7615f5 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -358,7 +358,7 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
                 return relative_path;
             }
 
-            var it = mem.tokenize(u8, path, &[_]u8{this_sep});
+            var it = mem.tokenizeScalar(u8, path, this_sep);
             _ = (it.next() orelse return relative_path);
             _ = (it.next() orelse return relative_path);
             return WindowsPath{
@@ -420,8 +420,8 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool {
     const sep1 = ns1[0];
     const sep2 = ns2[0];
 
-    var it1 = mem.tokenize(u8, ns1, &[_]u8{sep1});
-    var it2 = mem.tokenize(u8, ns2, &[_]u8{sep2});
+    var it1 = mem.tokenizeScalar(u8, ns1, sep1);
+    var it2 = mem.tokenizeScalar(u8, ns2, sep2);
 
     // TODO ASCII is wrong, we actually need full unicode support to compare paths.
     return ascii.eqlIgnoreCase(it1.next().?, it2.next().?);
@@ -441,8 +441,8 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8
             const sep1 = p1[0];
             const sep2 = p2[0];
 
-            var it1 = mem.tokenize(u8, p1, &[_]u8{sep1});
-            var it2 = mem.tokenize(u8, p2, &[_]u8{sep2});
+            var it1 = mem.tokenizeScalar(u8, p1, sep1);
+            var it2 = mem.tokenizeScalar(u8, p2, sep2);
 
             // TODO ASCII is wrong, we actually need full unicode support to compare paths.
             return ascii.eqlIgnoreCase(it1.next().?, it2.next().?) and ascii.eqlIgnoreCase(it1.next().?, it2.next().?);
@@ -535,7 +535,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
                 break :l disk_designator.len;
             },
             .NetworkShare => {
-                var it = mem.tokenize(u8, paths[first_index], "/\\");
+                var it = mem.tokenizeAny(u8, paths[first_index], "/\\");
                 const server_name = it.next().?;
                 const other_name = it.next().?;
 
@@ -570,7 +570,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
         if (!correct_disk_designator) {
             continue;
         }
-        var it = mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\");
+        var it = mem.tokenizeAny(u8, p[parsed.disk_designator.len..], "/\\");
         while (it.next()) |component| {
             if (mem.eql(u8, component, ".")) {
                 continue;
@@ -657,7 +657,7 @@ pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.E
             negative_count = 0;
             result.clearRetainingCapacity();
         }
-        var it = mem.tokenize(u8, p, "/");
+        var it = mem.tokenizeScalar(u8, p, '/');
         while (it.next()) |component| {
             if (mem.eql(u8, component, ".")) {
                 continue;
@@ -1078,8 +1078,8 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) !
         return resolved_to;
     }
 
-    var from_it = mem.tokenize(u8, resolved_from, "/\\");
-    var to_it = mem.tokenize(u8, resolved_to, "/\\");
+    var from_it = mem.tokenizeAny(u8, resolved_from, "/\\");
+    var to_it = mem.tokenizeAny(u8, resolved_to, "/\\");
     while (true) {
         const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
         const to_rest = to_it.rest();
@@ -1102,7 +1102,7 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) !
             result_index += 3;
         }
 
-        var rest_it = mem.tokenize(u8, to_rest, "/\\");
+        var rest_it = mem.tokenizeAny(u8, to_rest, "/\\");
         while (rest_it.next()) |to_component| {
             result[result_index] = '\\';
             result_index += 1;
@@ -1124,8 +1124,8 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]
     const resolved_to = try resolvePosix(allocator, &[_][]const u8{ cwd, to });
     defer allocator.free(resolved_to);
 
-    var from_it = mem.tokenize(u8, resolved_from, "/");
-    var to_it = mem.tokenize(u8, resolved_to, "/");
+    var from_it = mem.tokenizeScalar(u8, resolved_from, '/');
+    var to_it = mem.tokenizeScalar(u8, resolved_to, '/');
     while (true) {
         const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
         const to_rest = to_it.rest();
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 91b688a25c1e..6e1b2cb22626 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -331,7 +331,7 @@ pub const Response = struct {
     };
 
     pub fn parse(res: *Response, bytes: []const u8, trailing: bool) ParseError!void {
-        var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
+        var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n");
 
         const first_line = it.next() orelse return error.HttpHeadersInvalid;
         if (first_line.len < 12)
@@ -357,7 +357,7 @@ pub const Response = struct {
                 else => {},
             }
 
-            var line_it = mem.tokenize(u8, line, ": ");
+            var line_it = mem.tokenizeAny(u8, line, ": ");
             const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
             const header_value = line_it.rest();
 
@@ -371,7 +371,7 @@ pub const Response = struct {
             } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
                 // Transfer-Encoding: second, first
                 // Transfer-Encoding: deflate, chunked
-                var iter = mem.splitBackwards(u8, header_value, ",");
+                var iter = mem.splitBackwardsScalar(u8, header_value, ',');
 
                 if (iter.next()) |first| {
                     const trimmed = mem.trim(u8, first, " ");
diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig
index 67641eab004a..844a9bc8cfbe 100644
--- a/lib/std/http/Server.zig
+++ b/lib/std/http/Server.zig
@@ -178,7 +178,7 @@ pub const Request = struct {
     };
 
     pub fn parse(req: *Request, bytes: []const u8) ParseError!void {
-        var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
+        var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n");
 
         const first_line = it.next() orelse return error.HttpHeadersInvalid;
         if (first_line.len < 10)
@@ -212,7 +212,7 @@ pub const Request = struct {
                 else => {},
             }
 
-            var line_it = mem.tokenize(u8, line, ": ");
+            var line_it = mem.tokenizeAny(u8, line, ": ");
             const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
             const header_value = line_it.rest();
 
@@ -224,7 +224,7 @@ pub const Request = struct {
             } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
                 // Transfer-Encoding: second, first
                 // Transfer-Encoding: deflate, chunked
-                var iter = mem.splitBackwards(u8, header_value, ",");
+                var iter = mem.splitBackwardsScalar(u8, header_value, ',');
 
                 if (iter.next()) |first| {
                     const trimmed = mem.trim(u8, first, " ");
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 311c97c25491..212d09a1a839 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -1958,72 +1958,117 @@ test "byteSwapAllFields" {
     }, k);
 }
 
+/// Deprecated: use `tokenizeAny`, `tokenizeSequence`, or `tokenizeScalar`
+pub const tokenize = tokenizeAny;
+
 /// Returns an iterator that iterates over the slices of `buffer` that are not
-/// any of the bytes in `delimiter_bytes`.
+/// any of the items in `delimiters`.
 ///
-/// `tokenize(u8, "   abc def    ghi  ", " ")` will return slices
+/// `tokenizeAny(u8, "   abc|def ||  ghi  ", " |")` will return slices
 /// for "abc", "def", "ghi", null, in that order.
 ///
 /// If `buffer` is empty, the iterator will return null.
-/// If `delimiter_bytes` does not exist in buffer,
+/// If none of `delimiters` exist in buffer,
 /// the iterator will return `buffer`, null, in that order.
 ///
-/// See also: `split` and `splitBackwards`.
-pub fn tokenize(comptime T: type, buffer: []const T, delimiter_bytes: []const T) TokenIterator(T) {
+/// See also: `tokenizeSequence`, `tokenizeScalar`,
+///           `splitSequence`,`splitAny`, `splitScalar`,
+///           `splitBackwardsSequence`, `splitBackwardsAny`, and `splitBackwardsScalar`
+pub fn tokenizeAny(comptime T: type, buffer: []const T, delimiters: []const T) TokenIterator(T, .any) {
     return .{
         .index = 0,
         .buffer = buffer,
-        .delimiter_bytes = delimiter_bytes,
+        .delimiter = delimiters,
     };
 }
 
-test "tokenize" {
-    var it = tokenize(u8, "   abc def   ghi  ", " ");
+/// Returns an iterator that iterates over the slices of `buffer` that are not
+/// the sequence in `delimiter`.
+///
+/// `tokenizeSequence(u8, "<>abc><>ghi", "<>")` will return slices
+/// for "abc>b<><>c><>d><", "<>");
+    try testing.expectEqualStrings("a", it.next().?);
+    try testing.expectEqualStrings("b", it.peek().?);
+    try testing.expectEqualStrings("b", it.next().?);
+    try testing.expectEqualStrings("c>", it.next().?);
+    try testing.expectEqualStrings("d><", it.next().?);
+    try testing.expect(it.next() == null);
+    try testing.expect(it.peek() == null);
+
+    var it16 = tokenizeSequence(
+        u16,
+        std.unicode.utf8ToUtf16LeStringLiteral("a<>b<><>c><>d><"),
+        std.unicode.utf8ToUtf16LeStringLiteral("<>"),
+    );
+    try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("a")));
+    try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("b")));
+    try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("c>")));
+    try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("d><")));
+    try testing.expect(it16.next() == null);
+}
+
 test "tokenize (reset)" {
-    var it = tokenize(u8, "   abc def   ghi  ", " ");
-    try testing.expect(eql(u8, it.next().?, "abc"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "ghi"));
+    {
+        var it = tokenizeAny(u8, "   abc def   ghi  ", " ");
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+
+        it.reset();
 
-    it.reset();
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = tokenizeSequence(u8, "<><>abc<>def<><>ghi<>", "<>");
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
 
-    try testing.expect(eql(u8, it.next().?, "abc"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "ghi"));
-    try testing.expect(it.next() == null);
+        it.reset();
+
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = tokenizeScalar(u8, "   abc def   ghi  ", ' ');
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
 }
 
+/// Deprecated: use `splitSequence`, `splitAny`, or `splitScalar`
+pub const split = splitSequence;
+
 /// Returns an iterator that iterates over the slices of `buffer` that
-/// are separated by bytes in `delimiter`.
+/// are separated by the byte sequence in `delimiter`.
 ///
-/// `split(u8, "abc|def||ghi", "|")` will return slices
+/// `splitSequence(u8, "abc||def||||ghi", "||")` will return slices
 /// for "abc", "def", "", "ghi", null, in that order.
 ///
 /// If `delimiter` does not exist in buffer,
 /// the iterator will return `buffer`, null, in that order.
 /// The delimiter length must not be zero.
 ///
-/// See also: `tokenize` and `splitBackwards`.
-pub fn split(comptime T: type, buffer: []const T, delimiter: []const T) SplitIterator(T) {
+/// See also: `splitAny`, `splitScalar`, `splitBackwardsSequence`,
+///           `splitBackwardsAny`,`splitBackwardsScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitSequence(comptime T: type, buffer: []const T, delimiter: []const T) SplitIterator(T, .sequence) {
     assert(delimiter.len != 0);
     return .{
         .index = 0,
@@ -2080,8 +2184,48 @@ pub fn split(comptime T: type, buffer: []const T, delimiter: []const T) SplitIte
     };
 }
 
-test "split" {
-    var it = split(u8, "abc|def||ghi", "|");
+/// Returns an iterator that iterates over the slices of `buffer` that
+/// are separated by any item in `delimiters`.
+///
+/// `splitAny(u8, "abc,def||ghi", "|,")` will return slices
+/// for "abc", "def", "", "ghi", null, in that order.
+///
+/// If none of `delimiters` exist in buffer,
+/// the iterator will return `buffer`, null, in that order.
+///
+/// See also: `splitSequence`, `splitScalar`, `splitBackwardsSequence`,
+///           `splitBackwardsAny`,`splitBackwardsScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitAny(comptime T: type, buffer: []const T, delimiters: []const T) SplitIterator(T, .any) {
+    return .{
+        .index = 0,
+        .buffer = buffer,
+        .delimiter = delimiters,
+    };
+}
+
+/// Returns an iterator that iterates over the slices of `buffer` that
+/// are separated by `delimiter`.
+///
+/// `splitScalar(u8, "abc|def||ghi", '|')` will return slices
+/// for "abc", "def", "", "ghi", null, in that order.
+///
+/// If `delimiter` does not exist in buffer,
+/// the iterator will return `buffer`, null, in that order.
+///
+/// See also: `splitSequence`, `splitAny`, `splitBackwardsSequence`,
+///           `splitBackwardsAny`,`splitBackwardsScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitScalar(comptime T: type, buffer: []const T, delimiter: T) SplitIterator(T, .scalar) {
+    return .{
+        .index = 0,
+        .buffer = buffer,
+        .delimiter = delimiter,
+    };
+}
+
+test "splitScalar" {
+    var it = splitScalar(u8, "abc|def||ghi", '|');
     try testing.expectEqualSlices(u8, it.rest(), "abc|def||ghi");
     try testing.expectEqualSlices(u8, it.first(), "abc");
 
@@ -2097,30 +2241,30 @@ test "split" {
     try testing.expectEqualSlices(u8, it.rest(), "");
     try testing.expect(it.next() == null);
 
-    it = split(u8, "", "|");
+    it = splitScalar(u8, "", '|');
     try testing.expectEqualSlices(u8, it.first(), "");
     try testing.expect(it.next() == null);
 
-    it = split(u8, "|", "|");
+    it = splitScalar(u8, "|", '|');
     try testing.expectEqualSlices(u8, it.first(), "");
     try testing.expectEqualSlices(u8, it.next().?, "");
     try testing.expect(it.next() == null);
 
-    it = split(u8, "hello", " ");
+    it = splitScalar(u8, "hello", ' ');
     try testing.expectEqualSlices(u8, it.first(), "hello");
     try testing.expect(it.next() == null);
 
-    var it16 = split(
+    var it16 = splitScalar(
         u16,
         std.unicode.utf8ToUtf16LeStringLiteral("hello"),
-        std.unicode.utf8ToUtf16LeStringLiteral(" "),
+        ' ',
     );
     try testing.expectEqualSlices(u16, it16.first(), std.unicode.utf8ToUtf16LeStringLiteral("hello"));
     try testing.expect(it16.next() == null);
 }
 
-test "split (multibyte)" {
-    var it = split(u8, "a, b ,, c, d, e", ", ");
+test "splitSequence" {
+    var it = splitSequence(u8, "a, b ,, c, d, e", ", ");
     try testing.expectEqualSlices(u8, it.first(), "a");
     try testing.expectEqualSlices(u8, it.rest(), "b ,, c, d, e");
     try testing.expectEqualSlices(u8, it.next().?, "b ,");
@@ -2129,7 +2273,7 @@ test "split (multibyte)" {
     try testing.expectEqualSlices(u8, it.next().?, "e");
     try testing.expect(it.next() == null);
 
-    var it16 = split(
+    var it16 = splitSequence(
         u16,
         std.unicode.utf8ToUtf16LeStringLiteral("a, b ,, c, d, e"),
         std.unicode.utf8ToUtf16LeStringLiteral(", "),
@@ -2142,42 +2286,144 @@ test "split (multibyte)" {
     try testing.expect(it16.next() == null);
 }
 
+test "splitAny" {
+    var it = splitAny(u8, "a,b, c d e", ", ");
+    try testing.expectEqualSlices(u8, it.first(), "a");
+    try testing.expectEqualSlices(u8, it.rest(), "b, c d e");
+    try testing.expectEqualSlices(u8, it.next().?, "b");
+    try testing.expectEqualSlices(u8, it.next().?, "");
+    try testing.expectEqualSlices(u8, it.next().?, "c");
+    try testing.expectEqualSlices(u8, it.next().?, "d");
+    try testing.expectEqualSlices(u8, it.next().?, "e");
+    try testing.expect(it.next() == null);
+
+    it = splitAny(u8, "hello", "");
+    try testing.expect(eql(u8, it.next().?, "hello"));
+    try testing.expect(it.next() == null);
+
+    var it16 = splitAny(
+        u16,
+        std.unicode.utf8ToUtf16LeStringLiteral("a,b, c d e"),
+        std.unicode.utf8ToUtf16LeStringLiteral(", "),
+    );
+    try testing.expectEqualSlices(u16, it16.first(), std.unicode.utf8ToUtf16LeStringLiteral("a"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("b"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral(""));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("c"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("d"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("e"));
+    try testing.expect(it16.next() == null);
+}
+
 test "split (reset)" {
-    var it = split(u8, "abc def ghi", " ");
-    try testing.expect(eql(u8, it.first(), "abc"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "ghi"));
+    {
+        var it = splitSequence(u8, "abc def ghi", " ");
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
 
-    it.reset();
+        it.reset();
 
-    try testing.expect(eql(u8, it.first(), "abc"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "ghi"));
-    try testing.expect(it.next() == null);
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = splitAny(u8, "abc def,ghi", " ,");
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = splitScalar(u8, "abc def ghi", ' ');
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.first(), "abc"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "ghi"));
+        try testing.expect(it.next() == null);
+    }
 }
 
-/// Returns an iterator that iterates backwards over the slices of `buffer`
-/// that are separated by bytes in `delimiter`.
+/// Deprecated: use `splitBackwardsSequence`, `splitBackwardsAny`, or `splitBackwardsScalar`
+pub const splitBackwards = splitBackwardsSequence;
+
+/// Returns an iterator that iterates backwards over the slices of `buffer` that
+/// are separated by the sequence in `delimiter`.
 ///
-/// `splitBackwards(u8, "abc|def||ghi", "|")` will return slices
+/// `splitBackwardsSequence(u8, "abc||def||||ghi", "||")` will return slices
 /// for "ghi", "", "def", "abc", null, in that order.
 ///
 /// If `delimiter` does not exist in buffer,
 /// the iterator will return `buffer`, null, in that order.
 /// The delimiter length must not be zero.
 ///
-/// See also: `tokenize` and `split`.
-pub fn splitBackwards(comptime T: type, buffer: []const T, delimiter: []const T) SplitBackwardsIterator(T) {
+/// See also: `splitBackwardsAny`, `splitBackwardsScalar`,
+///           `splitSequence`, `splitAny`,`splitScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitBackwardsSequence(comptime T: type, buffer: []const T, delimiter: []const T) SplitBackwardsIterator(T, .sequence) {
     assert(delimiter.len != 0);
-    return SplitBackwardsIterator(T){
+    return .{
+        .index = buffer.len,
+        .buffer = buffer,
+        .delimiter = delimiter,
+    };
+}
+
+/// Returns an iterator that iterates backwards over the slices of `buffer` that
+/// are separated by any item in `delimiters`.
+///
+/// `splitBackwardsAny(u8, "abc,def||ghi", "|,")` will return slices
+/// for "ghi", "", "def", "abc", null, in that order.
+///
+/// If none of `delimiters` exist in buffer,
+/// the iterator will return `buffer`, null, in that order.
+///
+/// See also: `splitBackwardsSequence`, `splitBackwardsScalar`,
+///           `splitSequence`, `splitAny`,`splitScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitBackwardsAny(comptime T: type, buffer: []const T, delimiters: []const T) SplitBackwardsIterator(T, .any) {
+    return .{
+        .index = buffer.len,
+        .buffer = buffer,
+        .delimiter = delimiters,
+    };
+}
+
+/// Returns an iterator that iterates backwards over the slices of `buffer` that
+/// are separated by `delimiter`.
+///
+/// `splitBackwardsScalar(u8, "abc|def||ghi", '|')` will return slices
+/// for "ghi", "", "def", "abc", null, in that order.
+///
+/// If `delimiter` does not exist in buffer,
+/// the iterator will return `buffer`, null, in that order.
+///
+/// See also: `splitBackwardsSequence`, `splitBackwardsAny`,
+///           `splitSequence`, `splitAny`,`splitScalar`,
+///           `tokenizeAny`, `tokenizeSequence`, and `tokenizeScalar`.
+pub fn splitBackwardsScalar(comptime T: type, buffer: []const T, delimiter: T) SplitBackwardsIterator(T, .scalar) {
+    return .{
         .index = buffer.len,
         .buffer = buffer,
         .delimiter = delimiter,
     };
 }
 
-test "splitBackwards" {
-    var it = splitBackwards(u8, "abc|def||ghi", "|");
+test "splitBackwardsScalar" {
+    var it = splitBackwardsScalar(u8, "abc|def||ghi", '|');
     try testing.expectEqualSlices(u8, it.rest(), "abc|def||ghi");
     try testing.expectEqualSlices(u8, it.first(), "ghi");
 
@@ -2193,30 +2439,30 @@ test "splitBackwards" {
     try testing.expectEqualSlices(u8, it.rest(), "");
     try testing.expect(it.next() == null);
 
-    it = splitBackwards(u8, "", "|");
+    it = splitBackwardsScalar(u8, "", '|');
     try testing.expectEqualSlices(u8, it.first(), "");
     try testing.expect(it.next() == null);
 
-    it = splitBackwards(u8, "|", "|");
+    it = splitBackwardsScalar(u8, "|", '|');
     try testing.expectEqualSlices(u8, it.first(), "");
     try testing.expectEqualSlices(u8, it.next().?, "");
     try testing.expect(it.next() == null);
 
-    it = splitBackwards(u8, "hello", " ");
+    it = splitBackwardsScalar(u8, "hello", ' ');
     try testing.expectEqualSlices(u8, it.first(), "hello");
     try testing.expect(it.next() == null);
 
-    var it16 = splitBackwards(
+    var it16 = splitBackwardsScalar(
         u16,
         std.unicode.utf8ToUtf16LeStringLiteral("hello"),
-        std.unicode.utf8ToUtf16LeStringLiteral(" "),
+        ' ',
     );
     try testing.expectEqualSlices(u16, it16.first(), std.unicode.utf8ToUtf16LeStringLiteral("hello"));
     try testing.expect(it16.next() == null);
 }
 
-test "splitBackwards (multibyte)" {
-    var it = splitBackwards(u8, "a, b ,, c, d, e", ", ");
+test "splitBackwardsSequence" {
+    var it = splitBackwardsSequence(u8, "a, b ,, c, d, e", ", ");
     try testing.expectEqualSlices(u8, it.rest(), "a, b ,, c, d, e");
     try testing.expectEqualSlices(u8, it.first(), "e");
 
@@ -2235,7 +2481,7 @@ test "splitBackwards (multibyte)" {
     try testing.expectEqualSlices(u8, it.rest(), "");
     try testing.expect(it.next() == null);
 
-    var it16 = splitBackwards(
+    var it16 = splitBackwardsSequence(
         u16,
         std.unicode.utf8ToUtf16LeStringLiteral("a, b ,, c, d, e"),
         std.unicode.utf8ToUtf16LeStringLiteral(", "),
@@ -2248,18 +2494,83 @@ test "splitBackwards (multibyte)" {
     try testing.expect(it16.next() == null);
 }
 
-test "splitBackwards (reset)" {
-    var it = splitBackwards(u8, "abc def ghi", " ");
-    try testing.expect(eql(u8, it.first(), "ghi"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "abc"));
+test "splitBackwardsAny" {
+    var it = splitBackwardsAny(u8, "a,b, c d e", ", ");
+    try testing.expectEqualSlices(u8, it.rest(), "a,b, c d e");
+    try testing.expectEqualSlices(u8, it.first(), "e");
 
-    it.reset();
+    try testing.expectEqualSlices(u8, it.rest(), "a,b, c d");
+    try testing.expectEqualSlices(u8, it.next().?, "d");
 
-    try testing.expect(eql(u8, it.first(), "ghi"));
-    try testing.expect(eql(u8, it.next().?, "def"));
-    try testing.expect(eql(u8, it.next().?, "abc"));
+    try testing.expectEqualSlices(u8, it.rest(), "a,b, c");
+    try testing.expectEqualSlices(u8, it.next().?, "c");
+
+    try testing.expectEqualSlices(u8, it.rest(), "a,b,");
+    try testing.expectEqualSlices(u8, it.next().?, "");
+
+    try testing.expectEqualSlices(u8, it.rest(), "a,b");
+    try testing.expectEqualSlices(u8, it.next().?, "b");
+
+    try testing.expectEqualSlices(u8, it.rest(), "a");
+    try testing.expectEqualSlices(u8, it.next().?, "a");
+
+    try testing.expectEqualSlices(u8, it.rest(), "");
     try testing.expect(it.next() == null);
+
+    var it16 = splitBackwardsAny(
+        u16,
+        std.unicode.utf8ToUtf16LeStringLiteral("a,b, c d e"),
+        std.unicode.utf8ToUtf16LeStringLiteral(", "),
+    );
+    try testing.expectEqualSlices(u16, it16.first(), std.unicode.utf8ToUtf16LeStringLiteral("e"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("d"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("c"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral(""));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("b"));
+    try testing.expectEqualSlices(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("a"));
+    try testing.expect(it16.next() == null);
+}
+
+test "splitBackwards (reset)" {
+    {
+        var it = splitBackwardsSequence(u8, "abc def ghi", " ");
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = splitBackwardsAny(u8, "abc def,ghi", " ,");
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(it.next() == null);
+    }
+    {
+        var it = splitBackwardsScalar(u8, "abc def ghi", ' ');
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+
+        it.reset();
+
+        try testing.expect(eql(u8, it.first(), "ghi"));
+        try testing.expect(eql(u8, it.next().?, "def"));
+        try testing.expect(eql(u8, it.next().?, "abc"));
+        try testing.expect(it.next() == null);
+    }
 }
 
 /// Returns an iterator with a sliding window of slices for `buffer`.
@@ -2430,10 +2741,15 @@ test "endsWith" {
     try testing.expect(!endsWith(u8, "Bob", "Bo"));
 }
 
-pub fn TokenIterator(comptime T: type) type {
+pub const DelimiterType = enum { sequence, any, scalar };
+
+pub fn TokenIterator(comptime T: type, comptime delimiter_type: DelimiterType) type {
     return struct {
         buffer: []const T,
-        delimiter_bytes: []const T,
+        delimiter: switch (delimiter_type) {
+            .sequence, .any => []const T,
+            .scalar => T,
+        },
         index: usize,
 
         const Self = @This();
@@ -2450,7 +2766,10 @@ pub fn TokenIterator(comptime T: type) type {
         /// complete. Does not advance to the next token.
         pub fn peek(self: *Self) ?[]const T {
             // move to beginning of token
-            while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
+            while (self.index < self.buffer.len and self.isDelimiter(self.index)) : (self.index += switch (delimiter_type) {
+                .sequence => self.delimiter.len,
+                .any, .scalar => 1,
+            }) {}
             const start = self.index;
             if (start == self.buffer.len) {
                 return null;
@@ -2458,7 +2777,7 @@ pub fn TokenIterator(comptime T: type) type {
 
             // move to end of token
             var end = start;
-            while (end < self.buffer.len and !self.isSplitByte(self.buffer[end])) : (end += 1) {}
+            while (end < self.buffer.len and !self.isDelimiter(end)) : (end += 1) {}
 
             return self.buffer[start..end];
         }
@@ -2467,7 +2786,10 @@ pub fn TokenIterator(comptime T: type) type {
         pub fn rest(self: Self) []const T {
             // move to beginning of token
             var index: usize = self.index;
-            while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
+            while (index < self.buffer.len and self.isDelimiter(index)) : (index += switch (delimiter_type) {
+                .sequence => self.delimiter.len,
+                .any, .scalar => 1,
+            }) {}
             return self.buffer[index..];
         }
 
@@ -2476,22 +2798,32 @@ pub fn TokenIterator(comptime T: type) type {
             self.index = 0;
         }
 
-        fn isSplitByte(self: Self, byte: T) bool {
-            for (self.delimiter_bytes) |delimiter_byte| {
-                if (byte == delimiter_byte) {
-                    return true;
-                }
+        fn isDelimiter(self: Self, index: usize) bool {
+            switch (delimiter_type) {
+                .sequence => return startsWith(T, self.buffer[index..], self.delimiter),
+                .any => {
+                    const item = self.buffer[index];
+                    for (self.delimiter) |delimiter_item| {
+                        if (item == delimiter_item) {
+                            return true;
+                        }
+                    }
+                    return false;
+                },
+                .scalar => return self.buffer[index] == self.delimiter,
             }
-            return false;
         }
     };
 }
 
-pub fn SplitIterator(comptime T: type) type {
+pub fn SplitIterator(comptime T: type, comptime delimiter_type: DelimiterType) type {
     return struct {
         buffer: []const T,
         index: ?usize,
-        delimiter: []const T,
+        delimiter: switch (delimiter_type) {
+            .sequence, .any => []const T,
+            .scalar => T,
+        },
 
         const Self = @This();
 
@@ -2505,8 +2837,15 @@ pub fn SplitIterator(comptime T: type) type {
         /// Returns a slice of the next field, or null if splitting is complete.
         pub fn next(self: *Self) ?[]const T {
             const start = self.index orelse return null;
-            const end = if (indexOfPos(T, self.buffer, start, self.delimiter)) |delim_start| blk: {
-                self.index = delim_start + self.delimiter.len;
+            const end = if (switch (delimiter_type) {
+                .sequence => indexOfPos(T, self.buffer, start, self.delimiter),
+                .any => indexOfAnyPos(T, self.buffer, start, self.delimiter),
+                .scalar => indexOfScalarPos(T, self.buffer, start, self.delimiter),
+            }) |delim_start| blk: {
+                self.index = delim_start + switch (delimiter_type) {
+                    .sequence => self.delimiter.len,
+                    .any, .scalar => 1,
+                };
                 break :blk delim_start;
             } else blk: {
                 self.index = null;
@@ -2529,11 +2868,14 @@ pub fn SplitIterator(comptime T: type) type {
     };
 }
 
-pub fn SplitBackwardsIterator(comptime T: type) type {
+pub fn SplitBackwardsIterator(comptime T: type, comptime delimiter_type: DelimiterType) type {
     return struct {
         buffer: []const T,
         index: ?usize,
-        delimiter: []const T,
+        delimiter: switch (delimiter_type) {
+            .sequence, .any => []const T,
+            .scalar => T,
+        },
 
         const Self = @This();
 
@@ -2547,9 +2889,16 @@ pub fn SplitBackwardsIterator(comptime T: type) type {
         /// Returns a slice of the next field, or null if splitting is complete.
         pub fn next(self: *Self) ?[]const T {
             const end = self.index orelse return null;
-            const start = if (lastIndexOf(T, self.buffer[0..end], self.delimiter)) |delim_start| blk: {
+            const start = if (switch (delimiter_type) {
+                .sequence => lastIndexOf(T, self.buffer[0..end], self.delimiter),
+                .any => lastIndexOfAny(T, self.buffer[0..end], self.delimiter),
+                .scalar => lastIndexOfScalar(T, self.buffer[0..end], self.delimiter),
+            }) |delim_start| blk: {
                 self.index = delim_start;
-                break :blk delim_start + self.delimiter.len;
+                break :blk delim_start + switch (delimiter_type) {
+                    .sequence => self.delimiter.len,
+                    .any, .scalar => 1,
+                };
             } else blk: {
                 self.index = null;
                 break :blk 0;
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 7629ecc8f793..64b13ec54472 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1263,10 +1263,10 @@ fn linuxLookupNameFromHosts(
         },
         else => |e| return e,
     }) |line| {
-        var split_it = mem.split(u8, line, "#");
+        var split_it = mem.splitScalar(u8, line, '#');
         const no_comment_line = split_it.first();
 
-        var line_it = mem.tokenize(u8, no_comment_line, " \t");
+        var line_it = mem.tokenizeAny(u8, no_comment_line, " \t");
         const ip_text = line_it.next() orelse continue;
         var first_name_text: ?[]const u8 = null;
         while (line_it.next()) |name_text| {
@@ -1346,7 +1346,7 @@ fn linuxLookupNameFromDnsSearch(
     @memcpy(canon.items, canon_name);
     try canon.append('.');
 
-    var tok_it = mem.tokenize(u8, search, " \t");
+    var tok_it = mem.tokenizeAny(u8, search, " \t");
     while (tok_it.next()) |tok| {
         canon.shrinkRetainingCapacity(canon_name.len + 1);
         try canon.appendSlice(tok);
@@ -1465,15 +1465,15 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
         else => |e| return e,
     }) |line| {
         const no_comment_line = no_comment_line: {
-            var split = mem.split(u8, line, "#");
+            var split = mem.splitScalar(u8, line, '#');
             break :no_comment_line split.first();
         };
-        var line_it = mem.tokenize(u8, no_comment_line, " \t");
+        var line_it = mem.tokenizeAny(u8, no_comment_line, " \t");
 
         const token = line_it.next() orelse continue;
         if (mem.eql(u8, token, "options")) {
             while (line_it.next()) |sub_tok| {
-                var colon_it = mem.split(u8, sub_tok, ":");
+                var colon_it = mem.splitScalar(u8, sub_tok, ':');
                 const name = colon_it.first();
                 const value_txt = colon_it.next() orelse continue;
                 const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) {
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 7dbd454b872e..1fe155c8ff34 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -1878,7 +1878,7 @@ pub fn execvpeZ_expandArg0(
     // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed
     // directly to the operating system in execveZ.
     var path_buf: [MAX_PATH_BYTES]u8 = undefined;
-    var it = mem.tokenize(u8, PATH, ":");
+    var it = mem.tokenizeScalar(u8, PATH, ':');
     var seen_eacces = false;
     var err: ExecveError = error.FileNotFound;
 
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 504f9075eb47..80be7051876f 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -310,7 +310,7 @@ pub fn getEnvMap(allocator: Allocator) !EnvMap {
 
         for (environ) |env| {
             const pair = mem.sliceTo(env, 0);
-            var parts = mem.split(u8, pair, "=");
+            var parts = mem.splitScalar(u8, pair, '=');
             const key = parts.first();
             const value = parts.rest();
             try result.put(key, value);
@@ -1200,7 +1200,7 @@ fn totalSystemMemoryLinux() !usize {
     var buf: [50]u8 = undefined;
     const amt = try file.read(&buf);
     if (amt != 50) return error.Unexpected;
-    var it = std.mem.tokenize(u8, buf[0..amt], " \n");
+    var it = std.mem.tokenizeAny(u8, buf[0..amt], " \n");
     const label = it.next().?;
     if (!std.mem.eql(u8, label, "MemTotal:")) return error.Unexpected;
     const int_text = it.next() orelse return error.Unexpected;
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 93b6d97d7540..6432c733c6d7 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -239,7 +239,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
         .dynamic_linker = DynamicLinker.init(args.dynamic_linker),
     };
 
-    var it = mem.split(u8, args.arch_os_abi, "-");
+    var it = mem.splitScalar(u8, args.arch_os_abi, '-');
     const arch_name = it.first();
     const arch_is_native = mem.eql(u8, arch_name, "native");
     if (!arch_is_native) {
@@ -257,7 +257,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
 
     const opt_abi_text = it.next();
     if (opt_abi_text) |abi_text| {
-        var abi_it = mem.split(u8, abi_text, ".");
+        var abi_it = mem.splitScalar(u8, abi_text, '.');
         const abi = std.meta.stringToEnum(Target.Abi, abi_it.first()) orelse
             return error.UnknownApplicationBinaryInterface;
         result.abi = abi;
@@ -343,7 +343,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
 /// This is intended to be used if the API user of CrossTarget needs to learn the
 /// target CPU architecture in order to fully populate `ParseOptions`.
 pub fn parseCpuArch(args: ParseOptions) ?Target.Cpu.Arch {
-    var it = mem.split(u8, args.arch_os_abi, "-");
+    var it = mem.splitScalar(u8, args.arch_os_abi, '-');
     const arch_name = it.first();
     const arch_is_native = mem.eql(u8, arch_name, "native");
     if (arch_is_native) {
@@ -645,7 +645,7 @@ pub fn updateCpuFeatures(self: CrossTarget, set: *Target.Cpu.Feature.Set) void {
 }
 
 fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const u8) !void {
-    var it = mem.split(u8, text, ".");
+    var it = mem.splitScalar(u8, text, '.');
     const os_name = it.first();
     diags.os_name = os_name;
     const os_is_native = mem.eql(u8, os_name, "native");
@@ -706,7 +706,7 @@ fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const
         .linux,
         .dragonfly,
         => {
-            var range_it = mem.split(u8, version_text, "...");
+            var range_it = mem.splitSequence(u8, version_text, "...");
 
             const min_text = range_it.next().?;
             const min_ver = SemVer.parse(min_text) catch |err| switch (err) {
@@ -726,7 +726,7 @@ fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const
         },
 
         .windows => {
-            var range_it = mem.split(u8, version_text, "...");
+            var range_it = mem.splitSequence(u8, version_text, "...");
 
             const min_text = range_it.first();
             const min_ver = std.meta.stringToEnum(Target.Os.WindowsVersion, min_text) orelse
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
index 46b57998076b..72d49b4f36af 100644
--- a/lib/std/zig/ErrorBundle.zig
+++ b/lib/std/zig/ErrorBundle.zig
@@ -294,7 +294,7 @@ fn renderErrorMessageToWriter(
 ///
 /// This is used to split the message in `@compileError("hello\nworld")` for example.
 fn writeMsg(eb: ErrorBundle, err_msg: ErrorMessage, stderr: anytype, indent: usize) !void {
-    var lines = std.mem.split(u8, eb.nullTerminatedString(err_msg.msg), "\n");
+    var lines = std.mem.splitScalar(u8, eb.nullTerminatedString(err_msg.msg), '\n');
     while (lines.next()) |line| {
         try stderr.writeAll(line);
         if (lines.index == null) break;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index e1ccc8e0e86f..83fa68567f15 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1995,7 +1995,7 @@ fn renderArrayInit(
             if (!expr_newlines[i]) {
                 try ais.writer().writeAll(expr_text);
             } else {
-                var by_line = std.mem.split(u8, expr_text, "\n");
+                var by_line = std.mem.splitScalar(u8, expr_text, '\n');
                 var last_line_was_empty = false;
                 try ais.writer().writeAll(by_line.first());
                 while (by_line.next()) |line| {
diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig
index 6f001c2e4f3c..e0df225c88ea 100644
--- a/lib/std/zig/system/NativePaths.zig
+++ b/lib/std/zig/system/NativePaths.zig
@@ -31,7 +31,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
         defer allocator.free(nix_cflags_compile);
 
         is_nix = true;
-        var it = mem.tokenize(u8, nix_cflags_compile, " ");
+        var it = mem.tokenizeScalar(u8, nix_cflags_compile, ' ');
         while (true) {
             const word = it.next() orelse break;
             if (mem.eql(u8, word, "-isystem")) {
@@ -62,7 +62,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
         defer allocator.free(nix_ldflags);
 
         is_nix = true;
-        var it = mem.tokenize(u8, nix_ldflags, " ");
+        var it = mem.tokenizeScalar(u8, nix_ldflags, ' ');
         while (true) {
             const word = it.next() orelse break;
             if (mem.eql(u8, word, "-rpath")) {
@@ -147,21 +147,21 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
         // We use os.getenv here since this part won't be executed on
         // windows, to get rid of unnecessary error handling.
         if (std.os.getenv("C_INCLUDE_PATH")) |c_include_path| {
-            var it = mem.tokenize(u8, c_include_path, ":");
+            var it = mem.tokenizeScalar(u8, c_include_path, ':');
             while (it.next()) |dir| {
                 try self.addIncludeDir(dir);
             }
         }
 
         if (std.os.getenv("CPLUS_INCLUDE_PATH")) |cplus_include_path| {
-            var it = mem.tokenize(u8, cplus_include_path, ":");
+            var it = mem.tokenizeScalar(u8, cplus_include_path, ':');
             while (it.next()) |dir| {
                 try self.addIncludeDir(dir);
             }
         }
 
         if (std.os.getenv("LIBRARY_PATH")) |library_path| {
-            var it = mem.tokenize(u8, library_path, ":");
+            var it = mem.tokenizeScalar(u8, library_path, ':');
             while (it.next()) |dir| {
                 try self.addLibDir(dir);
             }
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index b22580b6d833..f17356fdcd90 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -354,7 +354,7 @@ fn detectAbiAndDynamicLinker(
             const newline = mem.indexOfScalar(u8, buffer[0..len], '\n') orelse break :blk file;
             const line = buffer[0..newline];
             if (!mem.startsWith(u8, line, "#!")) break :blk file;
-            var it = mem.tokenize(u8, line[2..], " ");
+            var it = mem.tokenizeScalar(u8, line[2..], ' ');
             file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target);
             file.close();
         }
@@ -556,7 +556,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version {
     const dynstr_size = @intCast(usize, dynstr.size);
     const dynstr_bytes = buf[0..dynstr_size];
     _ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
-    var it = mem.split(u8, dynstr_bytes, &.{0});
+    var it = mem.splitScalar(u8, dynstr_bytes, 0);
     var max_ver: std.builtin.Version = .{ .major = 2, .minor = 2, .patch = 5 };
     while (it.next()) |s| {
         if (mem.startsWith(u8, s, "GLIBC_2.")) {
@@ -811,7 +811,7 @@ pub fn abiAndDynamicLinkerFromFile(
                 const strtab = strtab_buf[0..strtab_read_len];
 
                 const rpath_list = mem.sliceTo(strtab, 0);
-                var it = mem.tokenize(u8, rpath_list, ":");
+                var it = mem.tokenizeScalar(u8, rpath_list, ':');
                 while (it.next()) |rpath| {
                     if (glibcVerFromRPath(rpath)) |ver| {
                         result.target.os.version_range.linux.glibc = ver;
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index afb5bc6704be..879f0a6b1540 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -4950,7 +4950,7 @@ fn findGuidePaths(self: *Autodoc, file: *File, str: []const u8) ![]const u8 {
 
     // TODO: this algo is kinda inefficient
 
-    var it = std.mem.split(u8, str, "\n");
+    var it = std.mem.splitScalar(u8, str, '\n');
     while (it.next()) |line| {
         const trimmed_line = std.mem.trim(u8, line, " ");
         if (std.mem.startsWith(u8, trimmed_line, guide_prefix)) {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 30d825b9c6e7..cbdc789d40c9 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -4671,7 +4671,7 @@ pub fn hasSharedLibraryExt(filename: []const u8) bool {
         return true;
     }
     // Look for .so.X, .so.X.Y, .so.X.Y.Z
-    var it = mem.split(u8, filename, ".");
+    var it = mem.splitScalar(u8, filename, '.');
     _ = it.first();
     var so_txt = it.next() orelse return false;
     while (!mem.eql(u8, so_txt, "so")) {
@@ -5051,14 +5051,14 @@ fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []con
     defer context_lines.deinit();
 
     var current_err: ?*LldError = null;
-    var lines = mem.split(u8, stderr, std.cstr.line_sep);
+    var lines = mem.splitSequence(u8, stderr, std.cstr.line_sep);
     while (lines.next()) |line| {
         if (mem.startsWith(u8, line, prefix ++ ":")) {
             if (current_err) |err| {
                 err.context_lines = try context_lines.toOwnedSlice();
             }
 
-            var split = std.mem.split(u8, line, "error: ");
+            var split = std.mem.splitSequence(u8, line, "error: ");
             _ = split.first();
 
             const duped_msg = try std.fmt.allocPrint(comp.gpa, "{s}: {s}", .{ prefix, split.rest() });
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 59c6551de14e..b614200e4187 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -9232,9 +9232,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     }
 
     const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
-    var line_it = mem.tokenize(u8, asm_source, "\n\r;");
+    var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;");
     while (line_it.next()) |line| {
-        var mnem_it = mem.tokenize(u8, line, " \t");
+        var mnem_it = mem.tokenizeAny(u8, line, " \t");
         const mnem_str = mnem_it.next() orelse continue;
         if (mem.startsWith(u8, mnem_str, "#")) continue;
 
@@ -9258,7 +9258,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
                 return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
         } };
 
-        var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
+        var op_it = mem.tokenizeScalar(u8, mnem_it.rest(), ',');
         var ops = [1]encoder.Instruction.Operand{.none} ** 4;
         for (&ops) |*op| {
             const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
diff --git a/src/glibc.zig b/src/glibc.zig
index 1a8ce5ff157b..4ab00eeed9db 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -109,7 +109,7 @@ pub fn loadMetaData(gpa: Allocator, contents: []const u8) LoadMetaDataError!*ABI
             const target_name = mem.sliceTo(contents[index..], 0);
             index += target_name.len + 1;
 
-            var component_it = mem.tokenize(u8, target_name, "-");
+            var component_it = mem.tokenizeScalar(u8, target_name, '-');
             const arch_name = component_it.next() orelse {
                 log.err("abilists: expected arch name", .{});
                 return error.ZigInstallationCorrupt;
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index da877e129106..355c3bad8dcd 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -60,10 +60,10 @@ pub const LibCInstallation = struct {
         const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize));
         defer allocator.free(contents);
 
-        var it = std.mem.tokenize(u8, contents, "\n");
+        var it = std.mem.tokenizeScalar(u8, contents, '\n');
         while (it.next()) |line| {
             if (line.len == 0 or line[0] == '#') continue;
-            var line_it = std.mem.split(u8, line, "=");
+            var line_it = std.mem.splitScalar(u8, line, '=');
             const name = line_it.first();
             const value = line_it.rest();
             inline for (fields, 0..) |field, i| {
@@ -293,7 +293,7 @@ pub const LibCInstallation = struct {
             },
         }
 
-        var it = std.mem.tokenize(u8, exec_res.stderr, "\n\r");
+        var it = std.mem.tokenizeAny(u8, exec_res.stderr, "\n\r");
         var search_paths = std.ArrayList([]const u8).init(allocator);
         defer search_paths.deinit();
         while (it.next()) |line| {
@@ -613,7 +613,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
         },
     }
 
-    var it = std.mem.tokenize(u8, exec_res.stdout, "\n\r");
+    var it = std.mem.tokenizeAny(u8, exec_res.stdout, "\n\r");
     const line = it.next() orelse return error.LibCRuntimeNotFound;
     // When this command fails, it returns exit code 0 and duplicates the input file name.
     // So we detect failure by checking if the output matches exactly the input.
@@ -692,7 +692,7 @@ fn appendCcExe(args: *std.ArrayList([]const u8), skip_cc_env_var: bool) !void {
         return;
     };
     // Respect space-separated flags to the C compiler.
-    var it = std.mem.tokenize(u8, cc_env_var, " ");
+    var it = std.mem.tokenizeScalar(u8, cc_env_var, ' ');
     while (it.next()) |arg| {
         try args.append(arg);
     }
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 863f1e805a36..971706dae628 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -91,7 +91,7 @@ pub const Id = struct {
         var out: u32 = 0;
         var values: [3][]const u8 = undefined;
 
-        var split = mem.split(u8, string, ".");
+        var split = mem.splitScalar(u8, string, '.');
         var count: u4 = 0;
         while (split.next()) |value| {
             if (count > 2) {
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index a785f084cb8f..6d74e17dfd44 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -264,7 +264,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
 
 fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !void {
     const sep = std.fs.path.sep;
-    var it = std.mem.tokenize(u8, path, &.{sep});
+    var it = std.mem.tokenizeScalar(u8, path, sep);
     while (it.next()) |component| {
         if (self.file_segments.get(component)) |num| {
             try a.writer().writeIntBig(u16, num);
diff --git a/src/main.zig b/src/main.zig
index bc40dbc43117..4acd305ce7df 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -973,7 +973,7 @@ fn buildOutputType(
                         }
                     } else if (mem.eql(u8, arg, "--mod")) {
                         const info = args_iter.nextOrFatal();
-                        var info_it = mem.split(u8, info, ":");
+                        var info_it = mem.splitScalar(u8, info, ':');
                         const mod_name = info_it.next() orelse fatal("expected non-empty argument after {s}", .{arg});
                         const deps_str = info_it.next() orelse fatal("expected 'name:deps:path' after {s}", .{arg});
                         const root_src_orig = info_it.rest();
@@ -1173,7 +1173,7 @@ fn buildOutputType(
                         } else {
                             if (build_options.only_core_functionality) unreachable;
                             // example: --listen 127.0.0.1:9000
-                            var it = std.mem.split(u8, next_arg, ":");
+                            var it = std.mem.splitScalar(u8, next_arg, ':');
                             const host = it.next().?;
                             const port_text = it.next() orelse "14735";
                             const port = std.fmt.parseInt(u16, port_text, 10) catch |err|
@@ -1676,7 +1676,7 @@ fn buildOutputType(
                     },
                     .rdynamic => rdynamic = true,
                     .wl => {
-                        var split_it = mem.split(u8, it.only_arg, ",");
+                        var split_it = mem.splitScalar(u8, it.only_arg, ',');
                         while (split_it.next()) |linker_arg| {
                             // Handle nested-joined args like `-Wl,-rpath=foo`.
                             // Must be prefixed with 1 or 2 dashes.
@@ -2191,17 +2191,17 @@ fn buildOutputType(
                     const next_arg = linker_args_it.nextOrFatal();
                     try symbol_wrap_set.put(arena, next_arg, {});
                 } else if (mem.startsWith(u8, arg, "/subsystem:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     subsystem = try parseSubSystem(split_it.first());
                 } else if (mem.startsWith(u8, arg, "/implib:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     emit_implib = .{ .yes = split_it.first() };
                     emit_implib_arg_provided = true;
                 } else if (mem.startsWith(u8, arg, "/pdb:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     pdb_out_path = split_it.first();
                 } else if (mem.startsWith(u8, arg, "/version:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     const version_arg = split_it.first();
                     version = std.builtin.Version.parse(version_arg) catch |err| {
                         fatal("unable to parse /version '{s}': {s}", .{ arg, @errorName(err) });
@@ -3541,10 +3541,10 @@ fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
 }
 
 const ModuleDepIterator = struct {
-    split: mem.SplitIterator(u8),
+    split: mem.SplitIterator(u8, .scalar),
 
     fn init(deps_str: []const u8) ModuleDepIterator {
-        return .{ .split = mem.split(u8, deps_str, ",") };
+        return .{ .split = mem.splitScalar(u8, deps_str, ',') };
     }
 
     const Dependency = struct {
diff --git a/src/print_zir.zig b/src/print_zir.zig
index f67cb55ba981..cfa68424d0bb 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -2588,7 +2588,7 @@ const Writer = struct {
     fn writeDocComment(self: *Writer, stream: anytype, doc_comment_index: u32) !void {
         if (doc_comment_index != 0) {
             const doc_comment = self.code.nullTerminatedString(doc_comment_index);
-            var it = std.mem.tokenize(u8, doc_comment, "\n");
+            var it = std.mem.tokenizeScalar(u8, doc_comment, '\n');
             while (it.next()) |doc_line| {
                 try stream.writeByteNTimes(' ', self.indent);
                 try stream.print("///{s}\n", .{doc_line});
diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig
index 1eef9c7f7547..297c9c742380 100644
--- a/test/behavior/bugs/6456.zig
+++ b/test/behavior/bugs/6456.zig
@@ -18,7 +18,7 @@ test "issue 6456" {
     comptime {
         var fields: []const StructField = &[0]StructField{};
 
-        var it = std.mem.tokenize(u8, text, "\n");
+        var it = std.mem.tokenizeScalar(u8, text, '\n');
         while (it.next()) |name| {
             fields = fields ++ &[_]StructField{StructField{
                 .alignment = 0,
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index e56d9ad20144..48b1fd459ddf 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -804,7 +804,7 @@ const TestManifest = struct {
     };
 
     const TrailingIterator = struct {
-        inner: std.mem.TokenIterator(u8),
+        inner: std.mem.TokenIterator(u8, .any),
 
         fn next(self: *TrailingIterator) ?[]const u8 {
             const next_inner = self.inner.next() orelse return null;
@@ -814,7 +814,7 @@ const TestManifest = struct {
 
     fn ConfigValueIterator(comptime T: type) type {
         return struct {
-            inner: std.mem.SplitIterator(u8),
+            inner: std.mem.SplitIterator(u8, .scalar),
 
             fn next(self: *@This()) !?T {
                 const next_raw = self.inner.next() orelse return null;
@@ -855,7 +855,7 @@ const TestManifest = struct {
         const actual_start = start orelse return error.MissingTestManifest;
         const manifest_bytes = bytes[actual_start..end];
 
-        var it = std.mem.tokenize(u8, manifest_bytes, "\r\n");
+        var it = std.mem.tokenizeAny(u8, manifest_bytes, "\r\n");
 
         // First line is the test type
         const tt: Type = blk: {
@@ -886,7 +886,7 @@ const TestManifest = struct {
             if (trimmed.len == 0) break;
 
             // Parse key=value(s)
-            var kv_it = std.mem.split(u8, trimmed, "=");
+            var kv_it = std.mem.splitScalar(u8, trimmed, '=');
             const key = kv_it.first();
             try manifest.config_map.putNoClobber(key, kv_it.next() orelse return error.MissingValuesForConfig);
         }
@@ -904,7 +904,7 @@ const TestManifest = struct {
     ) ConfigValueIterator(T) {
         const bytes = self.config_map.get(key) orelse TestManifestConfigDefaults.get(self.type, key);
         return ConfigValueIterator(T){
-            .inner = std.mem.split(u8, bytes, ","),
+            .inner = std.mem.splitScalar(u8, bytes, ','),
         };
     }
 
@@ -932,7 +932,7 @@ const TestManifest = struct {
 
     fn trailing(self: TestManifest) TrailingIterator {
         return .{
-            .inner = std.mem.tokenize(u8, self.trailing_bytes, "\r\n"),
+            .inner = std.mem.tokenizeAny(u8, self.trailing_bytes, "\r\n"),
         };
     }
 
@@ -1408,7 +1408,7 @@ fn runOneCase(
                 // Render the expected lines into a string that we can compare verbatim.
                 var expected_generated = std.ArrayList(u8).init(arena);
 
-                var actual_line_it = std.mem.split(u8, actual_stderr.items, "\n");
+                var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n');
                 for (expected_errors) |expect_line| {
                     const actual_line = actual_line_it.next() orelse {
                         try expected_generated.appendSlice(expect_line);
diff --git a/test/src/check-stack-trace.zig b/test/src/check-stack-trace.zig
index bb1db55076e0..9856b5738e5a 100644
--- a/test/src/check-stack-trace.zig
+++ b/test/src/check-stack-trace.zig
@@ -27,7 +27,7 @@ pub fn main() !void {
         var buf = std.ArrayList(u8).init(arena);
         defer buf.deinit();
         if (stderr.len != 0 and stderr[stderr.len - 1] == '\n') stderr = stderr[0 .. stderr.len - 1];
-        var it = mem.split(u8, stderr, "\n");
+        var it = mem.splitScalar(u8, stderr, '\n');
         process_lines: while (it.next()) |line| {
             if (line.len == 0) continue;
 
diff --git a/tools/gen_outline_atomics.zig b/tools/gen_outline_atomics.zig
index c04591d03279..0cfdacffd1a3 100644
--- a/tools/gen_outline_atomics.zig
+++ b/tools/gen_outline_atomics.zig
@@ -88,7 +88,7 @@ fn writeFunction(
         \\    asm volatile (
         \\
     );
-    var iter = std.mem.split(u8, body, "\n");
+    var iter = std.mem.splitScalar(u8, body, '\n');
     while (iter.next()) |line| {
         try w.writeAll("        \\\\");
         try w.writeAll(line);
diff --git a/tools/generate_linux_syscalls.zig b/tools/generate_linux_syscalls.zig
index 11b18ae3bf5a..32e287b43403 100644
--- a/tools/generate_linux_syscalls.zig
+++ b/tools/generate_linux_syscalls.zig
@@ -51,11 +51,11 @@ pub fn main() !void {
         try writer.writeAll("pub const X86 = enum(usize) {\n");
 
         const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_32.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             // abi is always i386
             _ = fields.next() orelse return error.Incomplete;
@@ -70,11 +70,11 @@ pub fn main() !void {
         try writer.writeAll("pub const X64 = enum(usize) {\n");
 
         const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_64.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             const abi = fields.next() orelse return error.Incomplete;
             // The x32 abi syscalls are always at the end.
@@ -96,11 +96,11 @@ pub fn main() !void {
         );
 
         const table = try linux_dir.readFile("arch/arm/tools/syscall.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             const abi = fields.next() orelse return error.Incomplete;
             if (mem.eql(u8, abi, "oabi")) continue;
@@ -127,11 +127,11 @@ pub fn main() !void {
     {
         try writer.writeAll("pub const Sparc64 = enum(usize) {\n");
         const table = try linux_dir.readFile("arch/sparc/kernel/syscalls/syscall.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             const abi = fields.next() orelse return error.Incomplete;
             if (mem.eql(u8, abi, "32")) continue;
@@ -151,11 +151,11 @@ pub fn main() !void {
         );
 
         const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_o32.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             // abi is always o32
             _ = fields.next() orelse return error.Incomplete;
@@ -176,11 +176,11 @@ pub fn main() !void {
         );
 
         const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_n64.tbl", buf);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             // abi is always n64
             _ = fields.next() orelse return error.Incomplete;
@@ -197,11 +197,11 @@ pub fn main() !void {
 
         const table = try linux_dir.readFile("arch/powerpc/kernel/syscalls/syscall.tbl", buf);
         var list_64 = std.ArrayList(u8).init(allocator);
-        var lines = mem.tokenize(u8, table, "\n");
+        var lines = mem.tokenizeScalar(u8, table, '\n');
         while (lines.next()) |line| {
             if (line[0] == '#') continue;
 
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const number = fields.next() orelse return error.Incomplete;
             const abi = fields.next() orelse return error.Incomplete;
             const name = fields.next() orelse return error.Incomplete;
@@ -277,9 +277,9 @@ pub fn main() !void {
             },
         };
 
-        var lines = mem.tokenize(u8, defines, "\n");
+        var lines = mem.tokenizeScalar(u8, defines, '\n');
         loop: while (lines.next()) |line| {
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const cmd = fields.next() orelse return error.Incomplete;
             if (!mem.eql(u8, cmd, "#define")) continue;
             const define = fields.next() orelse return error.Incomplete;
@@ -339,9 +339,9 @@ pub fn main() !void {
             },
         };
 
-        var lines = mem.tokenize(u8, defines, "\n");
+        var lines = mem.tokenizeScalar(u8, defines, '\n');
         loop: while (lines.next()) |line| {
-            var fields = mem.tokenize(u8, line, " \t");
+            var fields = mem.tokenizeAny(u8, line, " \t");
             const cmd = fields.next() orelse return error.Incomplete;
             if (!mem.eql(u8, cmd, "#define")) continue;
             const define = fields.next() orelse return error.Incomplete;
diff --git a/tools/update_crc_catalog.zig b/tools/update_crc_catalog.zig
index 034b7afc9dd8..81b20b7dfa4f 100644
--- a/tools/update_crc_catalog.zig
+++ b/tools/update_crc_catalog.zig
@@ -78,7 +78,7 @@ pub fn main() anyerror!void {
         var residue: []const u8 = undefined;
         var name: []const u8 = undefined;
 
-        var it = mem.split(u8, line, "  ");
+        var it = mem.splitSequence(u8, line, "  ");
         while (it.next()) |property| {
             const i = mem.indexOf(u8, property, "=").?;
             const key = property[0..i];
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 31bdf40ef70d..6a923cf72a03 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -19,7 +19,7 @@ const Version = struct {
     minor: u32,
 
     fn parse(str: []const u8) !Version {
-        var it = std.mem.split(u8, str, ".");
+        var it = std.mem.splitScalar(u8, str, '.');
 
         const major = it.first();
         const minor = it.next() orelse return error.InvalidVersion;