Deploying AI into Production with FastAPI
Matt Eckerle
Software and Data Engineering Leader
from fastapi import FastAPI import logging logger = logging.getLogger( 'uvicorn.error' )
app = FastAPI() logger.info("App is running!")
@app.get('/') async def main(): logger.debug('GET /') return 'ok'
from fastapi import FastAPI import logging import joblib logger = logging.getLogger('uvicorn.error')
model = joblib.load('penguin_classifier.pkl') logger.info("Penguin classifier loaded successfully.") app = FastAPI()
from fastapi import FastAPI, Request
import logging
import time
logger = logging.getLogger('uvicorn.error')
app = FastAPI()
@app.middleware("http")
async def log_process_time(request: Request, call_next):
start_time = time.perf_counter()
response = await call_next(request)
process_time = time.perf_counter() - start_time
logger.info(f"Process time was {process_time} seconds.")
return response
uvicorn main:app --log-level debug
from fastapi import FastAPI app = FastAPI()
@app.get("/health") async def get_health(): return {"status": "OK"}
from fastapi import FastAPI import joblib model = joblib.load( 'penguin_classifier.pkl' ) app = FastAPI()
@app.get("/health") async def get_health(): params = model.get_params() return {"status": "OK", "params": params}
Deploying AI into Production with FastAPI