Zero-dependency Python performance tracker. Decorate, time, count — then report. No servers. No config. Just Python.
Watch pytrackio track a real Python pipeline from zero to report.
Copy this, run it, see your first performance report.
from pytrackio import track, timer, counter, report # 1. Decorate any function @track def fetch_user(user_id: int) -> dict: return {"id": user_id, "name": f"User-{user_id}"} # 2. Time any block of code with timer("data_processing"): users = [fetch_user(i) for i in range(20)] # 3. Named counters counter("api_calls").increment(20) # 4. See everything report()
Everything pytrackio exposes — no hidden complexity.
@track(name="custom") for clean metric names..increment(n) .decrement(n) .reset() .valuereport(colour=False) for log files. Returns the report as a string for custom logging or storage..reset() between test runs.Complete, runnable examples across five domains.
from pytrackio import track, timer, counter, report import time, random @track def validate_cart(cart_id: str) -> bool: time.sleep(random.uniform(0.01, 0.05)) return True @track(name="payment_gateway") def charge_card(amount: float, token: str) -> dict: time.sleep(random.uniform(0.08, 0.20)) if random.random() < 0.05: # 5% failure rate raise ValueError("Card declined") counter("revenue_cents").increment(int(amount * 100)) return {"status": "captured"} @track def send_confirmation_email(email: str) -> None: time.sleep(random.uniform(0.02, 0.04)) counter("emails_sent").increment() for i in range(100): with timer("full_order_pipeline"): try: validate_cart(f"CART-{i:04d}") charge_card(49.99, f"tok_{i}") send_confirmation_email(f"customer{i}@example.com") counter("orders_completed").increment() except ValueError: counter("orders_failed").increment() report() # error_rate on payment_gateway shows ~5% # avg_ms on full_order_pipeline = end-to-end latency
from pytrackio import track, timer, counter, report import time, random @track def load_dataset(path: str) -> list: time.sleep(0.12) counter("rows_ingested").increment(50_000) return list(range(50_000)) @track def engineer_features(data: list) -> list: time.sleep(0.09) # often the slowest step return [x / len(data) for x in data] @track(name="model_training") def train_model(features: list, labels: list) -> dict: time.sleep(0.45) counter("models_trained").increment() return {"accuracy": 0.923, "f1_score": 0.918} with timer("end_to_end_pipeline"): raw = load_dataset("/data/train.csv") features = engineer_features(raw) metrics = train_model(features, labels) report() # model_training is ~45% of total pipeline time
from fastapi import FastAPI from pytrackio import track, counter, get_registry app = FastAPI() # Use sync helpers — @track doesn't support async def yet @track def _get_user_logic(user_id: int) -> dict: counter("user_reads").increment() return {"id": user_id, "name": f"User-{user_id}"} @app.get("/users/{user_id}") async def get_user(user_id: int): return _get_user_logic(user_id) @app.get("/metrics") def get_metrics(): # Expose live metrics — add auth before production! registry = get_registry() return { "uptime_seconds": registry.uptime_seconds(), "functions": [ {"name": s.name, "calls": s.calls, "avg_ms": round(s.avg_ms, 2), "error_rate": round(s.error_rate, 2)} for s in registry.all_summaries() ] } # uvicorn fastapi_app:app --reload
from pytrackio import track, timer, counter, report import time, random @track def load_train_csv(path: str) -> dict: time.sleep(0.08) counter("train_rows").increment(10_000) return {"shape": (10_000, 47)} @track def feature_engineering(train, test) -> tuple: time.sleep(0.15) # often the slowest step return train, test @track(name="xgboost_cv") def cross_validate(data, folds: int = 5) -> float: time.sleep(0.6 * folds / 5) counter("cv_folds_run").increment(folds) return 0.82 + random.uniform(-0.02, 0.02) BASE = "/kaggle/input/competition" with timer("full_competition_pipeline"): train = load_train_csv(f"{BASE}/train.csv") test = load_train_csv(f"{BASE}/test.csv") train, test = feature_engineering(train, test) score = cross_validate(train, folds=5) print(f"CV Score: {score:.4f}") report() # Immediately see: feature_engineering + xgboost_cv are bottlenecks
from pytrackio import track, get_registry, counter import threading, time, random ERROR_RATE_THRESHOLD = 5.0 # percent LATENCY_THRESHOLD_MS = 500 # milliseconds def continuous_monitor() -> None: registry = get_registry() while True: for s in registry.all_summaries(): if s.error_rate > ERROR_RATE_THRESHOLD: print(f"[CRITICAL] {s.name}: {s.error_rate:.1f}% errors") if s.avg_ms > LATENCY_THRESHOLD_MS: print(f"[WARNING] {s.name}: {s.avg_ms:.0f}ms latency") time.sleep(30) # Start as daemon — dies with the main process threading.Thread(target=continuous_monitor, daemon=True).start() @track def process_payment(txn_id: str, amount: float) -> bool: time.sleep(random.uniform(0.05, 0.6)) if random.random() < 0.08: # 8% — will trigger alert raise RuntimeError("Payment processor timeout") return True
Every technical decision is deliberate.
Choose the right tool for your context.
| Feature | pytrackio | Prometheus | StatsD | Datadog | cProfile |
|---|---|---|---|---|---|
| Setup time | 30 seconds | Hours | 30 min | Hours | 0 sec |
| pip packages needed | 1 | 2+ | 1+ | 3+ | 0 |
| External server | ✓ None | ✗ Yes | ✗ Yes | ✗ Yes | ✓ None |
| Zero dependencies | ✓ | ✗ | ✗ | ✗ | ✓ |
| Error tracking | ✓ | ✓ | Limited | ✓ | ✗ |
| Production ready | ✓ | ✓ | ✓ | ✓ | ✗ |
| Thread safe | ✓ | ✓ | ✓ | ✓ | ✗ |
Pick a scope that matches your experience level. Every PR welcome.
# 1. Fork & clone git clone https://github.com/YOUR-USERNAME/pytrackio.git cd pytrackio && pip install -e . && pip install pytest pytest-cov # 2. Create a branch & build git checkout -b feature/async-track-support # make your changes in pytrackio/ and tests/ python -m pytest tests/ -v # all must pass # 3. Submit your PR git commit -m "feat: add async support for @track decorator" git push origin feature/async-track-support # then open a PR at github.com/danshu3007-lang/pytrackio
Built by Deepanshu — Data Analyst & open source developer.
Contributions, issues, and PRs are always welcome.