Complete Application Examples
This page provides complete, production-ready examples for different application types.
Node.js Express API
A complete REST API with database, caching, and AI integration.
Project Structure
my-api/
├── Dockerfile
├── strongly.manifest.yaml
├── package.json
├── .dockerignore
├── src/
│ ├── server.js
│ ├── routes/
│ │ ├── users.js
│ │ └── health.js
│ ├── services/
│ │ ├── database.js
│ │ └── ai.js
│ └── utils/
│ └── logger.js
Dockerfile
FROM node:18-alpine AS builder
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy source code
COPY . .
# Production stage
FROM node:18-alpine
WORKDIR /app
# Copy from builder
COPY /app/node_modules ./node_modules
COPY /app/src ./src
COPY /app/package.json ./
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
USER nodejs
EXPOSE 3000
CMD ["node", "src/server.js"]
strongly.manifest.yaml
version: "1.0"
type: nodejs
name: user-api
description: REST API for user management with AI features
author: Acme Corp
tags:
- nodejs
- api
- backend
ports:
- port: 3000
name: http
expose: true
env:
- name: NODE_ENV
value: "production"
required: true
- name: LOG_LEVEL
value: "info"
description: Logging level (debug, info, warn, error)
- name: DATABASE_URL
value: ""
required: true
secret: true
description: PostgreSQL connection string
- name: REDIS_URL
value: ""
required: true
secret: true
description: Redis cache connection string
- name: JWT_SECRET
value: ""
required: true
secret: true
description: Secret for JWT token signing
health_check:
path: /health
port: 3000
initial_delay: 15
period: 30
timeout: 3
proxy:
websocket: false
timeout: 30
resources:
cpu_request: "100m"
memory_request: "256Mi"
cpu_limit: "500m"
memory_limit: "512Mi"
src/server.js
const express = require('express');
const logger = require('./utils/logger');
const healthRoutes = require('./routes/health');
const userRoutes = require('./routes/users');
const { initDatabase } = require('./services/database');
const { initAI } = require('./services/ai');
const app = express();
const PORT = process.env.PORT || 3000;
// Middleware
app.use(express.json());
app.use((req, res, next) => {
logger.info('Request received', {
method: req.method,
path: req.path,
ip: req.ip
});
next();
});
// Routes
app.use('/health', healthRoutes);
app.use('/api/users', userRoutes);
// Error handling
app.use((err, req, res, next) => {
logger.error('Error occurred', {
error: err.message,
stack: err.stack,
path: req.path
});
res.status(500).json({
error: 'Internal server error',
message: err.message
});
});
// Start server
async function start() {
try {
// Initialize services
await initDatabase();
await initAI();
app.listen(PORT, () => {
logger.info(`Server started on port ${PORT}`);
});
} catch (err) {
logger.error('Failed to start server', { error: err.message });
process.exit(1);
}
}
start();
src/services/database.js
const { Pool } = require('pg');
const logger = require('../utils/logger');
let pool = null;
async function initDatabase() {
const services = JSON.parse(process.env.STRONGLY_SERVICES || '{}');
// Try to get PostgreSQL from addons first, then datasources
const pgAddons = services.services?.addons?.postgres || [];
if (pgAddons.length > 0) {
const pgAddon = pgAddons[0];
pool = new Pool({
connectionString: pgAddon.connection?.connection_string,
max: 20
});
} else if (process.env.DATABASE_URL) {
// Fallback to environment variable
pool = new Pool({ connectionString: process.env.DATABASE_URL });
} else {
throw new Error('No database configuration found');
}
// Test connection
const client = await pool.connect();
await client.query('SELECT NOW()');
client.release();
logger.info('Database connected successfully');
}
function getPool() {
if (!pool) {
throw new Error('Database not initialized');
}
return pool;
}
module.exports = { initDatabase, getPool };
src/services/ai.js
const logger = require('../utils/logger');
let aiGateway = null;
async function initAI() {
const services = JSON.parse(process.env.STRONGLY_SERVICES || '{}');
// Get AI Gateway configuration
aiGateway = services.services?.aigateway;
if (!aiGateway || !aiGateway.available_models?.length) {
logger.warn('No AI models configured');
return;
}
logger.info('AI Gateway initialized', {
models: aiGateway.available_models.length,
base_url: aiGateway.base_url
});
}
async function generateText(prompt) {
if (!aiGateway || !aiGateway.available_models?.length) {
throw new Error('AI Gateway not initialized');
}
const model = aiGateway.available_models[0];
const response = await fetch(`${aiGateway.base_url}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-Model-Id': model._id
},
body: JSON.stringify({
model: model.vendor_model_id,
messages: [{ role: 'user', content: prompt }],
max_tokens: 500
})
});
const data = await response.json();
return data.choices[0].message.content;
}
module.exports = { initAI, generateText };
src/routes/health.js
const express = require('express');
const { getPool } = require('../services/database');
const router = express.Router();
router.get('/', async (req, res) => {
try {
// Check database
const pool = getPool();
const client = await pool.connect();
await client.query('SELECT 1');
client.release();
res.status(200).json({
status: 'ok',
timestamp: new Date().toISOString(),
checks: {
database: 'ok'
}
});
} catch (err) {
res.status(503).json({
status: 'error',
timestamp: new Date().toISOString(),
error: err.message
});
}
});
module.exports = router;
.dockerignore
node_modules
npm-debug.log
.git
.env
.DS_Store
*.md
coverage
.vscode
.idea
test/
React SPA with Runtime Config
A production-ready React single-page application.
strongly.manifest.yaml
version: "1.0"
type: react
name: admin-dashboard
description: Admin dashboard with runtime configuration
author: Acme Corp
tags:
- react
- frontend
- dashboard
ports:
- port: 3000
name: http
expose: true
static:
root: /usr/share/nginx/html
index: index.html
spa: true
config_file: /usr/share/nginx/html/config.js
config_placeholder: __APP_CONFIG__
build:
args:
NODE_ENV: production
REACT_APP_VERSION: "1.0.0"
output_dir: /app/build
env:
- name: REACT_APP_API_URL
value: "https://api.acme.com"
description: Backend API endpoint
- name: REACT_APP_VERSION
value: "1.0.0"
buildtime: true
description: Application version
health_check:
path: /
port: 3000
initial_delay: 5
period: 30
resources:
cpu_request: "50m"
memory_request: "128Mi"
cpu_limit: "200m"
memory_limit: "256Mi"
src/config.js
// Access runtime configuration
const config = window.__APP_CONFIG__ || {
REACT_APP_API_URL: 'http://localhost:3000'
};
export const API_URL = config.REACT_APP_API_URL;
export const APP_VERSION = config.REACT_APP_VERSION || '1.0.0';
export default config;
Python Flask API
A Flask API with PostgreSQL and AI integration.
Dockerfile
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc \
postgresql-client && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Copy requirements
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Create non-root user
RUN useradd -m -u 1001 appuser && \
chown -R appuser:appuser /app
USER appuser
EXPOSE 5000
# Use gunicorn for production
CMD ["gunicorn", "--bind", "0.0.0.0:5000", "--workers", "4", "--timeout", "60", "app:app"]
strongly.manifest.yaml
version: "1.0"
type: flask
name: ml-api
description: Machine learning inference API
author: Data Team
tags:
- python
- flask
- ml
ports:
- port: 5000
name: http
expose: true
env:
- name: FLASK_ENV
value: "production"
required: true
- name: LOG_LEVEL
value: "INFO"
description: Python logging level
- name: DATABASE_URL
value: ""
required: true
secret: true
description: PostgreSQL connection string
health_check:
path: /health
port: 5000
initial_delay: 20
period: 30
timeout: 5
resources:
cpu_request: "200m"
memory_request: "512Mi"
cpu_limit: "1000m"
memory_limit: "1Gi"
app.py
import os
import logging
from flask import Flask, jsonify
from config import Config
from services.database import init_db
from services.ai import init_ai
from routes.health import health_bp
from routes.users import users_bp
# Configure logging
logging.basicConfig(
level=os.environ.get('LOG_LEVEL', 'INFO'),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Create Flask app
app = Flask(__name__)
app.config.from_object(Config)
# Initialize services on first request using modern Flask pattern
initialized = False
@app.before_request
def initialize_once():
global initialized
if not initialized:
try:
init_db()
init_ai()
logger.info('Services initialized successfully')
except Exception as e:
logger.error(f'Failed to initialize services: {e}')
raise
initialized = True
# Register blueprints
app.register_blueprint(health_bp, url_prefix='/health')
app.register_blueprint(users_bp, url_prefix='/api/users')
# Error handlers
@app.errorhandler(404)
def not_found(error):
return jsonify({'error': 'Not found'}), 404
@app.errorhandler(500)
def internal_error(error):
logger.error(f'Internal error: {error}')
return jsonify({'error': 'Internal server error'}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
services/database.py
import os
import json
import psycopg2
from psycopg2 import pool
import logging
logger = logging.getLogger(__name__)
connection_pool = None
def init_db():
global connection_pool
# Parse STRONGLY_SERVICES
services = json.loads(os.environ.get('STRONGLY_SERVICES', '{}'))
# Get PostgreSQL from addons
pg_addons = services.get('services', {}).get('addons', {}).get('postgres', [])
if pg_addons:
pg_addon = pg_addons[0]
db_url = pg_addon['connection']['connection_string']
else:
db_url = os.environ.get('DATABASE_URL')
if not db_url:
raise ValueError('No database configuration found')
# Create connection pool
connection_pool = pool.SimpleConnectionPool(
1, # min connections
20, # max connections
db_url
)
logger.info('Database connection pool created')
def get_db():
if not connection_pool:
raise RuntimeError('Database not initialized')
return connection_pool.getconn()
def release_db(conn):
if connection_pool:
connection_pool.putconn(conn)
Fullstack Application
A fullstack application with React frontend and Node.js backend.
strongly.manifest.yaml
version: "1.0"
type: fullstack
name: my-fullstack-app
description: Fullstack app with React frontend and Node.js backend
author: Acme Corp
tags:
- fullstack
- react
- nodejs
ports:
- port: 3000
name: frontend
expose: true
- port: 8000
name: backend
expose: true
env:
- name: NODE_ENV
value: "production"
required: true
- name: DATABASE_URL
value: ""
required: true
secret: true
description: Database connection string
runtime:
command: ["node", "server.js"]
working_dir: /app
health_check_path: /health
startup_timeout: 60
health_check:
path: /health
port: 3000
initial_delay: 15
period: 30
resources:
cpu_request: "200m"
memory_request: "512Mi"
cpu_limit: "1000m"
memory_limit: "1Gi"
Dockerfile
# Build frontend
FROM node:18-alpine AS frontend-builder
WORKDIR /app/client
COPY client/package*.json ./
RUN npm ci
COPY client/ .
RUN npm run build
# Build backend
FROM node:18-alpine AS backend-builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
# Production stage
FROM node:18-alpine
WORKDIR /app
COPY /app/node_modules ./node_modules
COPY /app/server.js ./
COPY /app/client/dist ./client/dist
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
USER nodejs
EXPOSE 3000
CMD ["node", "server.js"]
R Shiny Dashboard
An interactive R Shiny dashboard application.
strongly.manifest.yaml
version: "1.0"
type: rshiny
name: analytics-dashboard
description: Interactive analytics dashboard built with R Shiny
author: Data Team
tags:
- rshiny
- analytics
- dashboard
ports:
- port: 3838
name: http
expose: true
env:
- name: SHINY_LOG_LEVEL
value: "INFO"
description: Shiny server log level
- name: DATA_SOURCE_URL
value: ""
required: true
secret: true
description: Database connection for analytics data
runtime:
working_dir: /app
health_check_path: /
health_check:
path: /
port: 3838
initial_delay: 20
period: 30
timeout: 5
resources:
cpu_request: "200m"
memory_request: "512Mi"
cpu_limit: "1000m"
memory_limit: "1Gi"
Dockerfile
FROM rocker/shiny:4.3
WORKDIR /app
# Install R packages
COPY install_packages.R .
RUN Rscript install_packages.R
# Copy Shiny app
COPY app/ /srv/shiny-server/app/
# Configure Shiny Server
COPY shiny-server.conf /etc/shiny-server/shiny-server.conf
EXPOSE 3838
CMD ["/usr/bin/shiny-server"]
MCP Server
A Model Context Protocol server providing custom tools.
strongly.manifest.yaml
version: "1.0"
type: mcp_server
name: data-tools-mcp
description: MCP server providing data processing and analysis tools
author: Platform Team
homepage: https://github.com/acme/data-tools-mcp
license: MIT
tags:
- mcp
- tools
- data-processing
ports:
- port: 8080
name: http
expose: true
env:
- name: LOG_LEVEL
value: "info"
description: Logging level
- name: MAX_CONCURRENT_REQUESTS
value: "10"
description: Maximum concurrent tool executions
runtime:
command: ["python", "server.py"]
working_dir: /app
health_check_path: /health
startup_timeout: 60
health_check:
path: /health
port: 8080
initial_delay: 10
period: 30
resources:
cpu_request: "100m"
memory_request: "256Mi"
cpu_limit: "500m"
memory_limit: "512Mi"
mcp:
protocol_version: "1.0"
transport: http
capabilities:
- tools
- prompts
authentication_required: true
tools:
- name: analyze_csv
description: Analyzes a CSV file and provides summary statistics
parameters:
type: object
properties:
file_url:
type: string
description: URL of the CSV file to analyze
columns:
type: array
items:
type: string
description: Specific columns to analyze (optional)
required: [file_url]
- name: transform_data
description: Transforms data using specified operations
parameters:
type: object
properties:
input:
type: string
description: Input data as JSON string
operations:
type: array
items:
type: string
description: List of transformation operations
required: [input, operations]
Dockerfile
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
RUN useradd -m -u 1001 appuser && \
chown -R appuser:appuser /app
USER appuser
EXPOSE 8080
CMD ["python", "server.py"]
Deployment Commands
For all examples above:
# 1. Create project archive
tar -czf app.tar.gz .
# 2. Upload via platform UI
# Navigate to Apps -> Deploy App
# Upload app.tar.gz
# 3. Configure environment
# Select resource tier
# Connect services (databases, AI models)
# Set environment variables
# 4. Deploy
# Click "Deploy" button
# Monitor build progress (20 minute timeout)
# 5. View application
# Click "View App" once deployed