axum from scratch
just because.
example up and running
cargo new backend
running the example from the repo https://github.com/tokio-rs/axum?tab=readme-ov-file#usage-example
add deps
[package]name = "backend"version = "0.1.0"edition = "2021"
[dependencies]axum = "0.7.5"serde = { version = "1.0", features = ["derive"] }serde_json = "1.0.68"tokio = { version = "1.0", features = ["full"] }tracing = "0.1"tracing-subscriber = { version = "0.3", features = ["env-filter"] }
—
use axum::{ routing::{get, post}, http::StatusCode, Json, Router,};use serde::{Deserialize, Serialize};
#[tokio::main]async fn main() { // initialize tracing tracing_subscriber::fmt::init();
// build our application with a route let app = Router::new() // `GET /` goes to `root` .route("/", get(root)) // `POST /users` goes to `create_user` .route("/users", post(create_user));
// run our app with hyper, listening globally on port 3000 let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); axum::serve(listener, app).await.unwrap();}
// basic handler that responds with a static stringasync fn root() -> &'static str { "Hello, World!"}
async fn create_user( // this argument tells axum to parse the request body // as JSON into a `CreateUser` type Json(payload): Json<CreateUser>,) -> (StatusCode, Json<User>) { // insert your application logic here let user = User { id: 1337, username: payload.username, };
// this will be converted into a JSON response // with a status code of `201 Created` (StatusCode::CREATED, Json(user))}
// the input to our `create_user` handler#[derive(Deserialize)]struct CreateUser { username: String,}
// the output to our `create_user` handler#[derive(Serialize)]struct User { id: u64, username: String,}
run it
cargo run
curl it
curl -X POST -H "Content-Type: application/json" -d '{"username":"axum"}' http://localhost:3000/users
watch it instead (re-run on file changes)
cargo install cargo-watch
now start the server with cargo watch
-q
: quiet mode. reduces output verbosity.-c
: clears the screen before each run.-w src/
: watches thesrc/
directory for changes.-x run
: executescargo run
on detected changes.
cargo watch -q -c -w src/ -x run
add a database
docker-compose
we need a local db to connect to. use docker-compose.
add an .env
file with secrets for pg.
prefix env vars with the app name to avoid conflicts with the 1000s of other apps you’ve created and didn’t finish on your system ☠️
BACKEND_POSTGRES_DB=backendBACKEND_POSTGRES_HOST=localhostBACKEND_POSTGRES_PASSWORD=backendBACKEND_POSTGRES_USER=backend
dont forget to add the .env file to your .gitignore
.gitignore could look like this:
# Binaries for programs and tests/target/
# Build artifacts/target/
# Documentation for your package/target/doc/
# Executables/*.rs.bk
# If you're using Cargo to manage dependencies, you can exclude them like so:.cargo/.cargo/config
# If you use rustup to manage your Rust installation, you can exclude toolchains and targets./rustup/toolchains//rustup/targets/
# Output of `cargo test` with colored output*.profraw
# Specific IDE/Editor configurations and temporary files# IntelliJ IDEA.idea/# CLion.clion/# VS Code.vscode/*.code-workspace
# Other editors*.sublime-project*.sublime-workspace
# Miscellaneous IDE/editor files*.swp*~.vscode/*.rs.bk
# Logs*.log
# By default, most projects don't want to track the dependency tree/Cargo.lock
# Ignore all `.DS_Store` files.DS_Store
# Ignore all MacOS specific files**/.DS_Store**/.AppleDouble**/._*
# Thumbnails._.Trashes._.Spotlight-V100
# Environment variable files.env.env.* # For cases where you might have multiple .env files, like .env.development or .env.production
# Allow tracking of env.example file!.env.example
create a docker-compose.yml file
# .env is loaded automatically
services:
postgres: image: postgres:16.2 container_name: backend-postgres volumes: - postgres_data:/var/lib/postgresql/data - postgres_data_backups:/backups environment: - POSTGRES_USER=${BACKEND_POSTGRES_USER} - POSTGRES_PASSWORD=${BACKEND_POSTGRES_PASSWORD} - POSTGRES_DB=${BACKEND_POSTGRES_DB} ports: - 5432:5432
volumes: postgres_data: {} postgres_data_backups: {}
i like to keep a Makefile in the root of my projects for common tasks
include .envexport # export all variables from .env file
ss: start-servicesstart-services: docker-compose up
sb: start-backendstart-backend: cd $(BACKEND_DIR) && cargo watch -q -c -w src/ -x run
sqlx
use postgres and sqlx
...sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "any", "postgres"] }
let’s introduce AppState
to our app:
(see axum create example: https://docs.rs/axum/latest/axum/index.html#sharing-state-with-handlers)
clean up main.rs and add a database connection
use axum::{extract::State, http::StatusCode, routing::get, Router};use sqlx::postgres::{PgPool, PgPoolOptions};use std::{env, time::Duration};
#[tokio::main]async fn main() { // initialize tracing tracing_subscriber::fmt::init();
let app_state = AppState::new().await;
// build our application with a route let app = Router::new() .route("/", get(root)) .route( "/using_connection_pool_extractor", get(using_connection_pool_extractor), ) .with_state(app_state);
// run our app with hyper, listening globally on port 3000 let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); axum::serve(listener, app).await.unwrap();}
// basic handler that responds with a static stringasync fn root() -> &'static str { "Hello, World!"}
// we can extract the connection pool with `State`async fn using_connection_pool_extractor( State(app_state): State<AppState>,) -> Result<String, (StatusCode, String)> { sqlx::query_scalar("select 'hello world from pg'") .fetch_one(&app_state.db) .await .map_err(internal_error)}
#[derive(Clone)]pub struct AppState { pub db: PgPool,}
impl AppState { pub async fn new() -> Self { // read env vars let pg_user = get_env_var("BACKEND_POSTGRES_USER"); let pg_password = get_env_var("BACKEND_POSTGRES_PASSWORD"); let pg_host = get_env_var("BACKEND_POSTGRES_HOST"); let pg_db = get_env_var("BACKEND_POSTGRES_DB");
let db_connection_str = format!( "postgres://{}:{}@{}/{}", pg_user, pg_password, pg_host, pg_db );
let pool = PgPoolOptions::new() .max_connections(5) .acquire_timeout(Duration::from_secs(3)) .connect(&db_connection_str) .await .expect("can't connect to database");
Self { db: pool } }}
fn get_env_var(key: &str) -> String { env::var(key).unwrap_or_else(|_| panic!("{} env var is not set", key))}
/// Utility function for mapping any error into a `500 Internal Server Error`/// response.fn internal_error<E>(err: E) -> (StatusCode, String)where E: std::error::Error,{ (StatusCode::INTERNAL_SERVER_ERROR, err.to_string())}
integrating sea orm
let’s add sea orm to our project using the seo-orm axum example
update cargo.toml
sea-orm = { version = "0.12", features = ["sqlx-postgres", "runtime-tokio-rustls", "macros"] }
migrations
install
cargo install sea-orm-cli
add a set of migration commands to the makefile
...
MIGRATIONS_DIR=src/migrations
# migrationsmigrate-init: sea-orm-cli migrate init -d $(MIGRATIONS_DIR)
create-migration: sea-orm-cli migrate create -d $(MIGRATIONS_DIR)
migrate: sea-orm-cli migrate -d $(MIGRATIONS_DIR)
user table migration
let’s create a generic user table corresponding to this:
CREATE TABLE user ( id BIGINT AUTO_INCREMENT PRIMARY KEY, -- Unique identifier for the user uuid CHAR(36) NOT NULL UNIQUE, -- UUID for external exposure username VARCHAR(50) NOT NULL UNIQUE, -- Username, must be unique email VARCHAR(255) NOT NULL UNIQUE, -- Email address, must be unique name VARCHAR(255), -- User's name image_url VARCHAR(255), -- URL to the user's profile picture created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- When the user was created updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP -- When the user was last updated);
update the migration file created on init
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]pub struct Migration;
#[async_trait::async_trait]impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { manager .create_table( Table::create() .table(User::Table) .if_not_exists() .col( ColumnDef::new(User::Id) .big_integer() .not_null() .auto_increment() .primary_key(), ) .col( ColumnDef::new(User::Uuid) .char_len(36) .not_null() .unique_key(), ) .col( ColumnDef::new(User::Username) .string_len(255) .not_null() .unique_key(), ) .col( ColumnDef::new(User::Email) .string_len(255) .not_null() .unique_key(), ) .col(ColumnDef::new(User::Name).string_len(255)) .col(ColumnDef::new(User::ImageUrl).string_len(255)) .col( ColumnDef::new(User::CreatedAt) .timestamp_with_time_zone() .default(Expr::current_timestamp()) .not_null(), ) .col( ColumnDef::new(User::UpdatedAt) .timestamp_with_time_zone() .default(Expr::current_timestamp()) .not_null(), ) .to_owned(), ) .await }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { manager .drop_table(Table::drop().table(User::Table).to_owned()) .await }}
#[derive(Iden)]pub enum User { Table, Id, Uuid, Username, Email, Name, ImageUrl, CreatedAt, UpdatedAt,}
and run the migration
make migrate