diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
new file mode 100644
index 0000000..ce14040
--- /dev/null
+++ b/.github/workflows/pages.yml
@@ -0,0 +1,57 @@
+name: GitHub Pages
+
+on:
+ push:
+ branches:
+ - main
+ - docs # temporary for testing
+
+# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+concurrency:
+ group: pages
+ cancel-in-progress: false
+
+jobs:
+ build-docs:
+ name: Build Documentation
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: 9.0.x
+
+ - name: Install docfX
+ run: dotnet tool update -g docfx
+ - name: Build documentation
+ run: docfx docfx.json
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: 'artifacts/_site'
+
+ publish-docs:
+ name: Publish Documentation
+ needs: build-docs
+
+ # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+ permissions:
+ actions: read
+ pages: write
+ id-token: write
+
+ # Deploy to the github-pages environment
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 8fec8cd..645b2fb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@ BenchmarkDotNet.Artifacts/
test-results/
TestResults/
.DS_Store
+/api/
\ No newline at end of file
diff --git a/Snappier/Snappier.csproj b/Snappier/Snappier.csproj
index 298cbdf..f17e334 100644
--- a/Snappier/Snappier.csproj
+++ b/Snappier/Snappier.csproj
@@ -44,7 +44,7 @@
-
+
diff --git a/docfx.json b/docfx.json
new file mode 100644
index 0000000..d3c9164
--- /dev/null
+++ b/docfx.json
@@ -0,0 +1,53 @@
+{
+ "$schema": "https://raw.githubusercontent.com/dotnet/docfx/main/schemas/docfx.schema.json",
+ "metadata": [
+ {
+ "src": [
+ {
+ "src": "./Snappier",
+ "files": [
+ "**/*.csproj"
+ ]
+ }
+ ],
+ "dest": "api",
+ "properties": {
+ "TargetFramework": "net8.0"
+ }
+ }
+ ],
+ "build": {
+ "content": [
+ {
+ "files": [
+ "**/*.{md,yml}"
+ ],
+ "exclude": [
+ "_site/**",
+ "artifacts/**",
+ "**/BenchmarkDotNet.Artifacts/**"
+ ]
+ }
+ ],
+ "resource": [
+ {
+ "files": [
+ "images/**"
+ ]
+ }
+ ],
+ "output": "artifacts/_site",
+ "template": [
+ "default",
+ "material/material"
+ ],
+ "globalMetadata": {
+ "_appName": "Snappier",
+ "_appTitle": "Snappier",
+ "_appLogoPath": "images/icon-48.png",
+ "_disableContribution": true,
+ "_enableSearch": true,
+ "pdf": false
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/block.md b/docs/block.md
new file mode 100644
index 0000000..a4fd12d
--- /dev/null
+++ b/docs/block.md
@@ -0,0 +1,129 @@
+# Block Compression
+
+Block compression is ideal for data up to 64KB, though it may be used for data of any size. It does not include any stream
+framing or CRC validation. It also doesn't automatically revert to uncompressed data in the event of data size growth.
+
+## Block compression/decompression using a buffer you already own
+
+```cs
+using Snappier;
+
+public class Program
+{
+ private static byte[] Data = {0, 1, 2}; // Wherever you get the data from
+
+ public static void Main()
+ {
+ // This option assumes that you are managing buffers yourself in an efficient way.
+ // In this example, we're using heap allocated byte arrays, however in most cases
+ // you would get these buffers from a buffer pool like ArrayPool or MemoryPool.
+
+ // If the output buffer is too small, an ArgumentException is thrown. This will not
+ // occur in this example because a sufficient buffer is always allocated via
+ // Snappy.GetMaxCompressedLength or Snappy.GetUncompressedLength. There are TryCompress
+ // and TryDecompress overloads that return false if the output buffer is too small
+ // rather than throwing an exception.
+
+ // Compression
+ byte[] buffer = new byte[Snappy.GetMaxCompressedLength(Data)];
+ int compressedLength = Snappy.Compress(Data, buffer);
+ Span compressed = buffer.AsSpan(0, compressedLength);
+
+ // Decompression
+ byte[] outputBuffer = new byte[Snappy.GetUncompressedLength(compressed)];
+ int decompressedLength = Snappy.Decompress(compressed, outputBuffer);
+
+ for (var i = 0; i < decompressedLength; i++)
+ {
+ // Do something with the data
+ }
+ }
+}
+```
+
+## Block compression/decompression using a memory pool buffer
+
+```cs
+using Snappier;
+
+public class Program
+{
+ private static byte[] Data = {0, 1, 2}; // Wherever you get the data from
+
+ public static void Main()
+ {
+ // This option uses `MemoryPool.Shared`. However, if you fail to
+ // dispose of the returned buffers correctly it can result in inefficient garbage collection.
+ // It is important to either call .Dispose() or use a using statement.
+
+ // Compression
+ using (IMemoryOwner compressed = Snappy.CompressToMemory(Data))
+ {
+ // Decompression
+ using (IMemoryOwner decompressed = Snappy.DecompressToMemory(compressed.Memory.Span))
+ {
+ // Do something with the data
+ }
+ }
+ }
+}
+```
+
+## Block compression/decompression using a buffer writter
+
+```cs
+using Snappier;
+using System.Buffers;
+
+public class Program
+{
+ private static byte[] Data = {0, 1, 2}; // Wherever you get the data from
+
+ public static void Main()
+ {
+ // This option uses `IBufferWriter`. In .NET 6 you can get a simple
+ // implementation such as `ArrayBufferWriter` but it may also be a `PipeWriter`
+ // or any other more advanced implementation of `IBufferWriter`.
+
+ // These overloads also accept a `ReadOnlySequence` which allows the source data
+ // to be made up of buffer segments rather than one large buffer. However, segment size
+ // may be a factor in performance. For compression, segments that are some multiple of
+ // 64KB are recommended. For decompression, simply avoid small segments.
+
+ // Compression
+ var compressedBufferWriter = new ArrayBufferWriter();
+ Snappy.Compress(new ReadOnlySequence(Data), compressedBufferWriter);
+ var compressedData = compressedBufferWriter.WrittenMemory;
+
+ // Decompression
+ var decompressedBufferWriter = new ArrayBufferWriter();
+ Snappy.Decompress(new ReadOnlySequence(compressedData), decompressedBufferWriter);
+ var decompressedData = decompressedBufferWriter.WrittenMemory;
+
+ // Do something with the data
+ }
+}
+```
+
+## Block compression/decompression using heap allocated byte[]
+
+```cs
+using Snappier;
+
+public class Program
+{
+ private static byte[] Data = {0, 1, 2}; // Wherever you get the data from
+
+ public static void Main()
+ {
+ // This is generally the least efficient option,
+ // but in some cases may be the simplest to implement.
+
+ // Compression
+ byte[] compressed = Snappy.CompressToArray(Data);
+
+ // Decompression
+ byte[] decompressed = Snappy.DecompressToArray(compressed);
+ }
+}
+```
\ No newline at end of file
diff --git a/docs/getting-started.md b/docs/getting-started.md
new file mode 100644
index 0000000..3be18ca
--- /dev/null
+++ b/docs/getting-started.md
@@ -0,0 +1,15 @@
+# Getting Started
+
+## Installing
+
+Simply add a NuGet package reference to the latest version of Snappier.
+
+```xml
+
+```
+
+or
+
+```sh
+dotnet add package Snappier
+```
diff --git a/docs/stream.md b/docs/stream.md
new file mode 100644
index 0000000..a8213c0
--- /dev/null
+++ b/docs/stream.md
@@ -0,0 +1,49 @@
+# Stream Compression
+
+Stream compression reads or writes the [Snappy framing format](https://github.com/google/snappy/blob/master/framing_format.txt) designed for streaming.
+It is ideal for data being sent over a network stream, and includes additional framing data and CRC validation.
+It also recognizes when an individual block in the stream compresses poorly and will include it in uncompressed form.
+
+## Stream compression/decompression
+
+Compressing or decompressing a stream follows the same paradigm as other compression streams in .NET. `SnappyStream` wraps an inner stream. If decompressing you read from the `SnappyStream`, if compressing you write to the `SnappyStream`
+
+```cs
+using System.IO;
+using System.IO.Compression;
+using Snappier;
+
+public class Program
+{
+ public static async Task Main()
+ {
+ using var fileStream = File.OpenRead("somefile.txt");
+
+ // First, compression
+ using var compressed = new MemoryStream();
+
+ using (var compressor = new SnappyStream(compressed, CompressionMode.Compress, leaveOpen: true))
+ {
+ await fileStream.CopyToAsync(compressor);
+
+ // Disposing the compressor also flushes the buffers to the inner stream
+ // We pass true to the constructor above so that it doesn't close/dispose the inner stream
+ // Alternatively, we could call compressor.Flush()
+ }
+
+ // Then, decompression
+
+ compressed.Position = 0; // Reset to beginning of the stream so we can read
+ using var decompressor = new SnappyStream(compressed, CompressionMode.Decompress);
+
+ var buffer = new byte[65536];
+ var bytesRead = decompressor.Read(buffer, 0, buffer.Length);
+ while (bytesRead > 0)
+ {
+ // Do something with the data
+
+ bytesRead = decompressor.Read(buffer, 0, buffer.Length)
+ }
+ }
+}
+```
\ No newline at end of file
diff --git a/docs/toc.yml b/docs/toc.yml
new file mode 100644
index 0000000..2cc1ff7
--- /dev/null
+++ b/docs/toc.yml
@@ -0,0 +1,6 @@
+- name: Getting Started
+ href: getting-started.md
+- name: Block Compression
+ href: block.md
+- name: Stream Compression
+ href: stream.md
diff --git a/images/icon-48.png b/images/icon-48.png
new file mode 100644
index 0000000..addb001
Binary files /dev/null and b/images/icon-48.png differ
diff --git a/icon.png b/images/icon.png
similarity index 100%
rename from icon.png
rename to images/icon.png
diff --git a/index.md b/index.md
new file mode 100644
index 0000000..9862954
--- /dev/null
+++ b/index.md
@@ -0,0 +1,27 @@
+---
+_layout: landing
+---
+
+# Snappier
+
+Snappier is a pure C# port of Google's [Snappy](https://github.com/google/snappy) compression algorithm. It is designed with speed as the primary goal, rather than compression ratio, and is ideal for compressing network traffic. Please see [the Snappy README file](https://github.com/google/snappy/blob/master/README.md) for more details on Snappy.
+
+## Project Goals
+
+The Snappier project aims to meet the following needs of the .NET community.
+
+- Cross-platform C# implementation for Linux and Windows, without P/Invoke or special OS installation requirements
+- Compatible with .NET 4.6.1 and later and .NET 6 and later
+- Use .NET paradigms, including asynchronous stream support
+- Full compatibility with both block and stream formats
+- Near C++ level performance
+ - Note: This is only possible on .NET 6 and later with the aid of [Span<T>](https://docs.microsoft.com/en-us/dotnet/api/system.span-1?view=netcore-3.1) and [System.Runtime.Intrinsics](https://fiigii.com/2019/03/03/Hardware-intrinsic-in-NET-Core-3-0-Introduction/).
+ - .NET 4.6.1 is the slowest
+- Keep allocations and garbage collection to a minimum using buffer pools
+
+## Other Projects
+
+There are other projects available for C#/.NET which implement Snappy compression.
+
+- [Snappy.NET](https://snappy.machinezoo.com/) - Uses P/Invoke to C++ for great performance. However, it only works on Windows, and is a bit heap allocation heavy in some cases. It also hasn't been updated since 2014 (as of 10/2020). This project may still be the best choice if your project is on the legacy .NET Framework on Windows, where Snappier is much less performant.
+- [IronSnappy](https://github.com/aloneguid/IronSnappy) - Another pure C# port, based on the Golang implementation instead of the C++ implementation.
diff --git a/toc.yml b/toc.yml
new file mode 100644
index 0000000..47cbb7f
--- /dev/null
+++ b/toc.yml
@@ -0,0 +1,4 @@
+- name: Documentation
+ href: docs/
+- name: API
+ href: api/
\ No newline at end of file