Keyboard shortcuts

Press or to navigate between chapters

Press S or / to search in the book

Press ? to show this help

Press Esc to hide this help

Ztorch API Reference

Complete API documentation for Ztorch v0.1.

Core Types

Tensor

The fundamental data structure.

pub const Tensor = struct {
    data: DevicePtr,
    shape: Shape,
    stride: Stride,
    dtype: DType,
    device: Device,
    requires_grad: bool,
};

Creation

// Zeros
const t = try Tensor.zeros(.{32, 128}, .f32, .cpu);

// Ones
const t = try Tensor.ones(.{32, 128}, .f32, .cpu);

// Random normal distribution
const t = try Tensor.randn(.{32, 128}, .f32, .cpu);

// Random integers [0, high)
const t = try Tensor.randint(.{32}, 10, .cpu);

// From slice
const data = [_]f32{ 1, 2, 3, 4 };
const t = try Tensor.fromSlice(.{2, 2}, &data, .cpu);

Operations

// Reshape (view, no copy)
const reshaped = try t.reshape(.{64, 64});

// Transpose
const transposed = try t.transpose();

// Get single item (must be 1-element tensor)
const value: f32 = t.item();

// To slice (copies to CPU)
var buffer: [4]f32 = undefined;
try t.toSlice(&buffer);

Shape

pub const Shape = struct {
    dims: [MAX_DIMS]usize,
    ndim: u8,
};

// Create shape
const shape = Shape.init(&[_]usize{ 32, 128, 256 });

// Number of elements
const n = shape.numel(); // 32 * 128 * 256

Device

pub const Device = enum {
    cpu,
    cuda,
    rocm,
    vulkan,
};

DType

pub const DType = enum {
    f32,
    f64,
    i32,
    i64,
    // more types in future versions
};

Model Definition

Sequential

Build a sequential model from layers.

const Model = ztorch.Sequential(.{
    ztorch.Linear(784, 128),
    ztorch.ReLU(),
    ztorch.Linear(128, 64),
    ztorch.ReLU(),
    ztorch.Linear(64, 10),
});

Layers

Linear

Fully connected layer: y = x @ W.T + b

pub fn Linear(comptime in_features: usize, comptime out_features: usize) type

Example:

ztorch.Linear(784, 128) // 784 → 128

Activations

ReLU: y = max(0, x)

ztorch.ReLU()

GELU: Gaussian Error Linear Unit

ztorch.GELU()

Softmax: y[i] = exp(x[i]) / sum(exp(x))

ztorch.Softmax(.{.dim = -1}) // softmax over last dimension

Normalization

LayerNorm:

ztorch.LayerNorm(normalized_shape, .{
    .eps = 1e-5,
    .elementwise_affine = true,
})

Utilities

Data Loading

Helpers for turning binary dumps into tensors. Especially useful with the MNIST preparation script.

const cwd = std.fs.cwd();
var images = try ztorch.data.mnist.loadImages(cwd, "data/mnist_train_x.bin", allocator, .{
    .max_samples = 1024, // 0 = entire file
});
defer images.deinit();

var labels = try ztorch.data.mnist.loadLabels(cwd, "data/mnist_train_y.bin", images.shape.dims[0], allocator, .{});
defer labels.deinit();

var iter = try ztorch.data.BatchIterator.init(&images, &labels, allocator, .{
    .batch_size = 128,
    .shuffle = true,
    .seed = 42,
});
defer iter.deinit();

while (try iter.next()) |batch| {
    var owned = batch;
    defer owned.deinit();
    // use owned.inputs / owned.labels.? tensors
}

Parameter Initializers

Create trainable tensors with the right gradient flags.

var weights = try ztorch.init.uniformParam(&[_]usize{ 784, 128 }, 0.05, allocator, 1);
var bias = try ztorch.init.zerosParam(&[_]usize{ 1, 128 }, allocator);

Metrics

Compute classification accuracy from logits or probabilities.

const train_acc = try ztorch.metrics.accuracyFromLogits(&logits, &labels, allocator);

var probs = try ztorch.ops.activations.softmax_cpu_scalar(&logits, -1, allocator);
defer probs.deinit();
const val_acc = try ztorch.metrics.accuracyFromProbabilities(&probs, &labels);

Checkpointing

Persist model parameters to disk and restore them later.

const entries = [_]ztorch.checkpoint.NamedTensor{
    .{ .name = "w1", .tensor = &w1 },
    .{ .name = "b1", .tensor = &b1 },
};

try ztorch.checkpoint.save("model.ckpt", entries[0..]);

var checkpoint = try ztorch.checkpoint.load("model.ckpt", allocator);
defer checkpoint.deinit();

const weights = checkpoint.get("w1") orelse unreachable;

MNIST Helpers

Utility functions for the built-in two-layer MNIST classifier.

const accuracy = try ztorch.models.mnist.evaluate(
    &test_images,
    &test_labels,
    &w1,
    &b1,
    &w2,
    &b2,
    allocator,
);

Compilation

pub fn compile(
    comptime self: anytype,
    comptime backend: Device,
    allocator: Allocator,
) !CompiledModel

Example:

const Model = ztorch.Sequential(.{
    ztorch.Linear(784, 10),
});

var model = try Model.compile(.cpu, allocator);
defer model.deinit();

Forward Pass

pub fn forward(self: *CompiledModel, input: Tensor) !Tensor

Example:

const input = try Tensor.randn(.{32, 784}, .f32, .cpu);
const output = try model.forward(input);

Loss Functions

Cross Entropy

pub fn crossEntropy(predictions: Tensor, targets: Tensor) !Tensor

Example:

const output = try model.forward(input);
const loss = try ztorch.crossEntropy(output, labels);

Mean Squared Error

pub fn mse(predictions: Tensor, targets: Tensor) !Tensor

Backward Pass

pub fn backward(self: *CompiledModel, loss: Tensor) !void

Computes gradients for all parameters with requires_grad = true.

Example:

const loss = try ztorch.crossEntropy(output, labels);
try model.backward(loss);

Optimization

Optimizer Step

pub fn step(self: *CompiledModel, config: OptimizerConfig) !void

pub const OptimizerConfig = union(enum) {
    sgd: SGDConfig,
    adam: AdamConfig,
};

SGD

pub const SGDConfig = struct {
    lr: f32,
    momentum: f32 = 0.0,
    weight_decay: f32 = 0.0,
};

try model.step(.{ .sgd = .{ .lr = 0.01, .momentum = 0.9 } });

Adam

pub const AdamConfig = struct {
    lr: f32,
    beta1: f32 = 0.9,
    beta2: f32 = 0.999,
    eps: f32 = 1e-8,
    weight_decay: f32 = 0.0,
};

try model.step(.{ .adam = .{ .lr = 0.001 } });

Complete Training Example

const std = @import("std");
const ztorch = @import("ztorch");

pub fn main() !void {
    var gpa = std.heap.GeneralPurposeAllocator(.{}){};
    defer _ = gpa.deinit();
    const allocator = gpa.allocator();

    // Define model
    const Model = ztorch.Sequential(.{
        ztorch.Linear(784, 128),
        ztorch.ReLU(),
        ztorch.Linear(128, 64),
        ztorch.ReLU(),
        ztorch.Linear(64, 10),
    });

    // Compile
    var model = try Model.compile(.cpu, allocator);
    defer model.deinit();

    // Training loop
    const epochs = 10;
    const batch_size = 32;

    for (0..epochs) |epoch| {
        var total_loss: f32 = 0;
        var num_batches: usize = 0;

        // ... load batch ...
        const input = try Tensor.randn(.{batch_size, 784}, .f32, .cpu);
        const labels = try Tensor.randint(.{batch_size}, 10, .cpu);

        // Forward
        const output = try model.forward(input);

        // Loss
        const loss = try ztorch.crossEntropy(output, labels);
        total_loss += loss.item();
        num_batches += 1;

        // Backward
        try model.backward(loss);

        // Update
        try model.step(.{ .adam = .{ .lr = 0.001 } });

        std.debug.print("Epoch {}: Loss = {d:.4}\n", .{
            epoch,
            total_loss / @as(f32, @floatFromInt(num_batches)),
        });
    }
}

Backend-Specific Features

CUDA

// Use CUDA backend
var model = try Model.compile(.cuda, allocator);

// CUDA-specific device selection (future)
// ztorch.cuda.setDevice(0);

Memory Management

// Models manage their own memory
var model = try Model.compile(.cpu, allocator);
defer model.deinit(); // Frees all tensors

// Explicit tensor lifetime
const t = try Tensor.zeros(.{100}, .f32, .cpu);
defer t.deinit();

Error Handling

All fallible operations return errors:

pub const Error = error{
    OutOfMemory,
    DeviceError,
    ShapeMismatch,
    InvalidDType,
    InvalidDevice,
    BackendNotSupported,
};

Example:

const output = model.forward(input) catch |err| {
    std.debug.print("Forward pass failed: {}\n", .{err});
    return err;
};