axum from scratch

just because.

example up and running

Terminal window
cargo new backend

running the example from the repo https://github.com/tokio-rs/axum?tab=readme-ov-file#usage-example

add deps

Cargo.toml
[package]
name = "backend"
version = "0.1.0"
edition = "2021"
[dependencies]
axum = "0.7.5"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.68"
tokio = { version = "1.0", features = ["full"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

src/main.rs
use axum::{
routing::{get, post},
http::StatusCode,
Json, Router,
};
use serde::{Deserialize, Serialize};
#[tokio::main]
async fn main() {
// initialize tracing
tracing_subscriber::fmt::init();
// build our application with a route
let app = Router::new()
// `GET /` goes to `root`
.route("/", get(root))
// `POST /users` goes to `create_user`
.route("/users", post(create_user));
// run our app with hyper, listening globally on port 3000
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
axum::serve(listener, app).await.unwrap();
}
// basic handler that responds with a static string
async fn root() -> &'static str {
"Hello, World!"
}
async fn create_user(
// this argument tells axum to parse the request body
// as JSON into a `CreateUser` type
Json(payload): Json<CreateUser>,
) -> (StatusCode, Json<User>) {
// insert your application logic here
let user = User {
id: 1337,
username: payload.username,
};
// this will be converted into a JSON response
// with a status code of `201 Created`
(StatusCode::CREATED, Json(user))
}
// the input to our `create_user` handler
#[derive(Deserialize)]
struct CreateUser {
username: String,
}
// the output to our `create_user` handler
#[derive(Serialize)]
struct User {
id: u64,
username: String,
}

run it

Terminal window
cargo run

curl it

Terminal window
curl -X POST -H "Content-Type: application/json" -d '{"username":"axum"}' http://localhost:3000/users

watch it instead (re-run on file changes)

Terminal window
cargo install cargo-watch

now start the server with cargo watch

Terminal window
cargo watch -q -c -w src/ -x run

add a database

docker-compose

we need a local db to connect to. use docker-compose.

add an .env file with secrets for pg.

prefix env vars with the app name to avoid conflicts with the 1000s of other apps you’ve created and didn’t finish on your system ☠️

.env
BACKEND_POSTGRES_DB=backend
BACKEND_POSTGRES_HOST=localhost
BACKEND_POSTGRES_PASSWORD=backend
BACKEND_POSTGRES_USER=backend

dont forget to add the .env file to your .gitignore
.gitignore could look like this:

.gitignore
# Binaries for programs and tests
/target/
# Build artifacts
/target/
# Documentation for your package
/target/doc/
# Executables
/*.rs.bk
# If you're using Cargo to manage dependencies, you can exclude them like so:
.cargo/
.cargo/config
# If you use rustup to manage your Rust installation, you can exclude toolchains and targets.
/rustup/toolchains/
/rustup/targets/
# Output of `cargo test` with colored output
*.profraw
# Specific IDE/Editor configurations and temporary files
# IntelliJ IDEA
.idea/
# CLion
.clion/
# VS Code
.vscode/
*.code-workspace
# Other editors
*.sublime-project
*.sublime-workspace
# Miscellaneous IDE/editor files
*.swp
*~
.vscode/
*.rs.bk
# Logs
*.log
# By default, most projects don't want to track the dependency tree
/Cargo.lock
# Ignore all `.DS_Store` files
.DS_Store
# Ignore all MacOS specific files
**/.DS_Store
**/.AppleDouble
**/._*
# Thumbnails
._.Trashes
._.Spotlight-V100
# Environment variable files
.env
.env.* # For cases where you might have multiple .env files, like .env.development or .env.production
# Allow tracking of env.example file
!.env.example

create a docker-compose.yml file

docker-compose.yml
# .env is loaded automatically
services:
postgres:
image: postgres:16.2
container_name: backend-postgres
volumes:
- postgres_data:/var/lib/postgresql/data
- postgres_data_backups:/backups
environment:
- POSTGRES_USER=${BACKEND_POSTGRES_USER}
- POSTGRES_PASSWORD=${BACKEND_POSTGRES_PASSWORD}
- POSTGRES_DB=${BACKEND_POSTGRES_DB}
ports:
- 5432:5432
volumes:
postgres_data: {}
postgres_data_backups: {}

i like to keep a Makefile in the root of my projects for common tasks

Makefile
include .env
export # export all variables from .env file
ss: start-services
start-services:
docker-compose up
sb: start-backend
start-backend:
cd $(BACKEND_DIR) && cargo watch -q -c -w src/ -x run

sqlx

use postgres and sqlx

Cargo.toml
...
sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "any", "postgres"] }

let’s introduce AppState to our app:
(see axum create example: https://docs.rs/axum/latest/axum/index.html#sharing-state-with-handlers)

clean up main.rs and add a database connection

src/main.rs
use axum::{extract::State, http::StatusCode, routing::get, Router};
use sqlx::postgres::{PgPool, PgPoolOptions};
use std::{env, time::Duration};
#[tokio::main]
async fn main() {
// initialize tracing
tracing_subscriber::fmt::init();
let app_state = AppState::new().await;
// build our application with a route
let app = Router::new()
.route("/", get(root))
.route(
"/using_connection_pool_extractor",
get(using_connection_pool_extractor),
)
.with_state(app_state);
// run our app with hyper, listening globally on port 3000
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
axum::serve(listener, app).await.unwrap();
}
// basic handler that responds with a static string
async fn root() -> &'static str {
"Hello, World!"
}
// we can extract the connection pool with `State`
async fn using_connection_pool_extractor(
State(app_state): State<AppState>,
) -> Result<String, (StatusCode, String)> {
sqlx::query_scalar("select 'hello world from pg'")
.fetch_one(&app_state.db)
.await
.map_err(internal_error)
}
#[derive(Clone)]
pub struct AppState {
pub db: PgPool,
}
impl AppState {
pub async fn new() -> Self {
// read env vars
let pg_user = get_env_var("BACKEND_POSTGRES_USER");
let pg_password = get_env_var("BACKEND_POSTGRES_PASSWORD");
let pg_host = get_env_var("BACKEND_POSTGRES_HOST");
let pg_db = get_env_var("BACKEND_POSTGRES_DB");
let db_connection_str = format!(
"postgres://{}:{}@{}/{}",
pg_user, pg_password, pg_host, pg_db
);
let pool = PgPoolOptions::new()
.max_connections(5)
.acquire_timeout(Duration::from_secs(3))
.connect(&db_connection_str)
.await
.expect("can't connect to database");
Self { db: pool }
}
}
fn get_env_var(key: &str) -> String {
env::var(key).unwrap_or_else(|_| panic!("{} env var is not set", key))
}
/// Utility function for mapping any error into a `500 Internal Server Error`
/// response.
fn internal_error<E>(err: E) -> (StatusCode, String)
where
E: std::error::Error,
{
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string())
}

integrating sea orm

let’s add sea orm to our project using the seo-orm axum example

update cargo.toml

Cargo.toml
sea-orm = { version = "0.12", features = ["sqlx-postgres", "runtime-tokio-rustls", "macros"] }

migrations

install

Terminal window
cargo install sea-orm-cli

add a set of migration commands to the makefile

Makefile
...
MIGRATIONS_DIR=src/migrations
# migrations
migrate-init:
sea-orm-cli migrate init -d $(MIGRATIONS_DIR)
create-migration:
sea-orm-cli migrate create -d $(MIGRATIONS_DIR)
migrate:
sea-orm-cli migrate -d $(MIGRATIONS_DIR)

user table migration

let’s create a generic user table corresponding to this:

CREATE TABLE user (
id BIGINT AUTO_INCREMENT PRIMARY KEY, -- Unique identifier for the user
uuid CHAR(36) NOT NULL UNIQUE, -- UUID for external exposure
username VARCHAR(50) NOT NULL UNIQUE, -- Username, must be unique
email VARCHAR(255) NOT NULL UNIQUE, -- Email address, must be unique
name VARCHAR(255), -- User's name
image_url VARCHAR(255), -- URL to the user's profile picture
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- When the user was created
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP -- When the user was last updated
);

update the migration file created on init

src/migrations/some_migration.rs
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(User::Table)
.if_not_exists()
.col(
ColumnDef::new(User::Id)
.big_integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(
ColumnDef::new(User::Uuid)
.char_len(36)
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(User::Username)
.string_len(255)
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(User::Email)
.string_len(255)
.not_null()
.unique_key(),
)
.col(ColumnDef::new(User::Name).string_len(255))
.col(ColumnDef::new(User::ImageUrl).string_len(255))
.col(
ColumnDef::new(User::CreatedAt)
.timestamp_with_time_zone()
.default(Expr::current_timestamp())
.not_null(),
)
.col(
ColumnDef::new(User::UpdatedAt)
.timestamp_with_time_zone()
.default(Expr::current_timestamp())
.not_null(),
)
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(User::Table).to_owned())
.await
}
}
#[derive(Iden)]
pub enum User {
Table,
Id,
Uuid,
Username,
Email,
Name,
ImageUrl,
CreatedAt,
UpdatedAt,
}

and run the migration

Terminal window
make migrate