Add S2I support and cleanup unused files
- Add complete Source-to-Image (S2I) deployment support - Create .s2i/ directory with assemble/run scripts and environment config - Add openshift-s2i.yaml template for S2I deployment - Add scripts/deploy-s2i.sh for automated S2I deployment - Add README-S2I.md with comprehensive S2I documentation - Update README.md and AIAgents-Support.md with S2I information - Clean up unused files: Dockerfile.simple, HTML backups, daemonset files - Remove unused Makefile and openshift-git-deploy.yaml - Update kustomization.yaml to use deployment instead of daemonset - Update undeploy-complete.sh to remove deployment instead of daemonset - Maintain clean and organized codebase structure
This commit is contained in:
40
.s2i/bin/assemble
Executable file
40
.s2i/bin/assemble
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
# S2I Assemble Script for ORU Analyzer
|
||||
# This script is called during the S2I build process
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== ORU Analyzer S2I Assemble Script ==="
|
||||
echo "Building ORU Analyzer from source..."
|
||||
|
||||
# Install Python dependencies
|
||||
echo "Installing Python dependencies..."
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Create application directory structure
|
||||
echo "Creating application directory structure..."
|
||||
mkdir -p /opt/app-root/src/app/static
|
||||
mkdir -p /opt/app-root/src/app/templates
|
||||
mkdir -p /opt/app-root/src/logs
|
||||
|
||||
# Copy application files
|
||||
echo "Copying application files..."
|
||||
cp -r app/* /opt/app-root/src/app/
|
||||
|
||||
# Set proper permissions
|
||||
echo "Setting permissions..."
|
||||
chmod +x /opt/app-root/src/app/main.py
|
||||
chmod -R 755 /opt/app-root/src/app/static
|
||||
|
||||
# Create startup script
|
||||
echo "Creating startup script..."
|
||||
cat > /opt/app-root/src/start.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
echo "Starting ORU Analyzer..."
|
||||
cd /opt/app-root/src
|
||||
exec python -m uvicorn app.main:app --host 0.0.0.0 --port 8080 --workers 1
|
||||
EOF
|
||||
|
||||
chmod +x /opt/app-root/src/start.sh
|
||||
|
||||
echo "=== S2I Assemble completed successfully ==="
|
||||
19
.s2i/bin/run
Executable file
19
.s2i/bin/run
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# S2I Run Script for ORU Analyzer
|
||||
# This script is called when the container starts
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== ORU Analyzer S2I Run Script ==="
|
||||
echo "Starting ORU Analyzer application..."
|
||||
|
||||
# Change to application directory
|
||||
cd /opt/app-root/src
|
||||
|
||||
# Set environment variables
|
||||
export PYTHONPATH=/opt/app-root/src
|
||||
export PYTHONUNBUFFERED=1
|
||||
|
||||
# Start the application
|
||||
echo "Launching ORU Analyzer..."
|
||||
exec /opt/app-root/src/start.sh
|
||||
35
.s2i/environment
Normal file
35
.s2i/environment
Normal file
@@ -0,0 +1,35 @@
|
||||
# S2I Environment Configuration for ORU Analyzer
|
||||
# OpenShift Source-to-Image configuration
|
||||
|
||||
# Python Configuration
|
||||
PYTHON_VERSION=3.11
|
||||
PIP_INDEX_URL=https://pypi.org/simple
|
||||
|
||||
# Application Configuration
|
||||
APP_NAME=oru-analyzer
|
||||
APP_VERSION=2.0.0
|
||||
|
||||
# FastAPI Configuration
|
||||
HOST=0.0.0.0
|
||||
PORT=8080
|
||||
WORKERS=1
|
||||
|
||||
# OpenShift Specific
|
||||
OPENSHIFT_BUILD_NAME=oru-analyzer
|
||||
OPENSHIFT_BUILD_NAMESPACE=resource-governance
|
||||
|
||||
# Resource Configuration
|
||||
CPU_REQUEST=100m
|
||||
CPU_LIMIT=500m
|
||||
MEMORY_REQUEST=256Mi
|
||||
MEMORY_LIMIT=1Gi
|
||||
|
||||
# Health Check Configuration
|
||||
HEALTH_CHECK_PATH=/health
|
||||
HEALTH_CHECK_INTERVAL=30s
|
||||
HEALTH_CHECK_TIMEOUT=10s
|
||||
HEALTH_CHECK_RETRIES=3
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
|
||||
@@ -1,31 +0,0 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Instalar dependências do sistema
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Criar usuário não-root
|
||||
RUN groupadd -r appuser && useradd -r -g appuser appuser
|
||||
|
||||
# Criar diretórios
|
||||
RUN mkdir -p /app /tmp/reports && \
|
||||
chown -R appuser:appuser /app /tmp/reports
|
||||
|
||||
# Instalar dependências Python
|
||||
COPY requirements.txt /app/
|
||||
WORKDIR /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copiar código da aplicação
|
||||
COPY app/ ./app/
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Mudar para usuário não-root
|
||||
USER appuser
|
||||
|
||||
# Expor porta
|
||||
EXPOSE 8080
|
||||
|
||||
# Comando para executar a aplicação
|
||||
CMD ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
139
Makefile
139
Makefile
@@ -1,139 +0,0 @@
|
||||
# Makefile para OpenShift Resource Governance Tool
|
||||
|
||||
# Configurações
|
||||
IMAGE_NAME = resource-governance
|
||||
TAG = latest
|
||||
REGISTRY = andersonid
|
||||
FULL_IMAGE_NAME = $(REGISTRY)/$(IMAGE_NAME):$(TAG)
|
||||
NAMESPACE = resource-governance
|
||||
|
||||
# Cores para output
|
||||
RED = \033[0;31m
|
||||
GREEN = \033[0;32m
|
||||
YELLOW = \033[1;33m
|
||||
BLUE = \033[0;34m
|
||||
NC = \033[0m # No Color
|
||||
|
||||
.PHONY: help build test deploy undeploy clean dev logs status
|
||||
|
||||
help: ## Mostrar ajuda
|
||||
@echo "$(BLUE)OpenShift Resource Governance Tool$(NC)"
|
||||
@echo ""
|
||||
@echo "Comandos disponíveis:"
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " $(GREEN)%-15s$(NC) %s\n", $$1, $$2}'
|
||||
|
||||
build: ## Build da imagem com Podman
|
||||
@echo "$(YELLOW)📦 Building container image with Podman...$(NC)"
|
||||
@./scripts/build.sh $(TAG) $(REGISTRY)
|
||||
|
||||
test: ## Testar a aplicação
|
||||
@echo "$(YELLOW)🧪 Testing application...$(NC)"
|
||||
@python -c "import app.main; print('$(GREEN)✅ App imports successfully$(NC)')"
|
||||
@echo "$(YELLOW)🧪 Testing API...$(NC)"
|
||||
@python -m uvicorn app.main:app --host 0.0.0.0 --port 8080 &
|
||||
@sleep 5
|
||||
@curl -f http://localhost:8080/health || (echo "$(RED)❌ Health check failed$(NC)" && exit 1)
|
||||
@pkill -f uvicorn
|
||||
@echo "$(GREEN)✅ Tests passed$(NC)"
|
||||
|
||||
deploy: ## Deploy no OpenShift
|
||||
@echo "$(YELLOW)🚀 Deploying to OpenShift...$(NC)"
|
||||
@./scripts/deploy.sh $(TAG) $(REGISTRY)
|
||||
|
||||
undeploy: ## Remover do OpenShift
|
||||
@echo "$(YELLOW)🗑️ Undeploying from OpenShift...$(NC)"
|
||||
@./scripts/undeploy.sh
|
||||
|
||||
clean: ## Limpar recursos locais
|
||||
@echo "$(YELLOW)🧹 Cleaning up...$(NC)"
|
||||
@docker rmi $(FULL_IMAGE_NAME) 2>/dev/null || true
|
||||
@docker system prune -f
|
||||
@echo "$(GREEN)✅ Cleanup completed$(NC)"
|
||||
|
||||
dev: ## Executar em modo desenvolvimento
|
||||
@echo "$(YELLOW)🔧 Starting development server...$(NC)"
|
||||
@python -m uvicorn app.main:app --reload --host 0.0.0.0 --port 8080
|
||||
|
||||
logs: ## Ver logs da aplicação
|
||||
@echo "$(YELLOW)📋 Showing application logs...$(NC)"
|
||||
@oc logs -f daemonset/$(IMAGE_NAME) -n $(NAMESPACE)
|
||||
|
||||
status: ## Ver status da aplicação
|
||||
@echo "$(YELLOW)📊 Application status:$(NC)"
|
||||
@oc get all -n $(NAMESPACE)
|
||||
@echo ""
|
||||
@echo "$(YELLOW)🌐 Route URL:$(NC)"
|
||||
@oc get route $(IMAGE_NAME)-route -n $(NAMESPACE) -o jsonpath='{.spec.host}' 2>/dev/null || echo "Route not found"
|
||||
|
||||
install-deps: ## Instalar dependências Python
|
||||
@echo "$(YELLOW)📦 Installing Python dependencies...$(NC)"
|
||||
@pip install -r requirements.txt
|
||||
@echo "$(GREEN)✅ Dependencies installed$(NC)"
|
||||
|
||||
format: ## Formatar código Python
|
||||
@echo "$(YELLOW)🎨 Formatting Python code...$(NC)"
|
||||
@python -m black app/
|
||||
@python -m isort app/
|
||||
@echo "$(GREEN)✅ Code formatted$(NC)"
|
||||
|
||||
lint: ## Verificar código Python
|
||||
@echo "$(YELLOW)🔍 Linting Python code...$(NC)"
|
||||
@python -m flake8 app/
|
||||
@python -m mypy app/
|
||||
@echo "$(GREEN)✅ Linting completed$(NC)"
|
||||
|
||||
security: ## Verificar segurança
|
||||
@echo "$(YELLOW)🔒 Security check...$(NC)"
|
||||
@python -m bandit -r app/
|
||||
@echo "$(GREEN)✅ Security check completed$(NC)"
|
||||
|
||||
all: clean install-deps format lint test build ## Executar pipeline completo
|
||||
|
||||
# Comandos específicos do OpenShift
|
||||
oc-login: ## Fazer login no OpenShift
|
||||
@echo "$(YELLOW)🔐 Logging into OpenShift...$(NC)"
|
||||
@oc login
|
||||
|
||||
oc-projects: ## Listar projetos OpenShift
|
||||
@echo "$(YELLOW)📋 OpenShift projects:$(NC)"
|
||||
@oc get projects
|
||||
|
||||
oc-ns: ## Criar namespace
|
||||
@echo "$(YELLOW)📁 Creating namespace...$(NC)"
|
||||
@oc apply -f k8s/namespace.yaml
|
||||
|
||||
oc-rbac: ## Aplicar RBAC
|
||||
@echo "$(YELLOW)🔐 Applying RBAC...$(NC)"
|
||||
@oc apply -f k8s/rbac.yaml
|
||||
|
||||
oc-config: ## Aplicar ConfigMap
|
||||
@echo "$(YELLOW)⚙️ Applying ConfigMap...$(NC)"
|
||||
@oc apply -f k8s/configmap.yaml
|
||||
|
||||
oc-deploy: ## Aplicar DaemonSet
|
||||
@echo "$(YELLOW)📦 Applying DaemonSet...$(NC)"
|
||||
@oc apply -f k8s/daemonset.yaml
|
||||
|
||||
oc-service: ## Aplicar Service
|
||||
@echo "$(YELLOW)🌐 Applying Service...$(NC)"
|
||||
@oc apply -f k8s/service.yaml
|
||||
|
||||
oc-route: ## Aplicar Route
|
||||
@echo "$(YELLOW)🛣️ Applying Route...$(NC)"
|
||||
@oc apply -f k8s/route.yaml
|
||||
|
||||
oc-apply: oc-ns oc-rbac oc-config oc-deploy oc-service oc-route ## Aplicar todos os recursos
|
||||
|
||||
# Comandos de monitoramento
|
||||
monitor: ## Monitorar aplicação
|
||||
@echo "$(YELLOW)📊 Monitoring application...$(NC)"
|
||||
@watch -n 5 'oc get pods -n $(NAMESPACE) && echo "" && oc get route $(IMAGE_NAME)-route -n $(NAMESPACE)'
|
||||
|
||||
health: ## Verificar saúde da aplicação
|
||||
@echo "$(YELLOW)🏥 Health check...$(NC)"
|
||||
@ROUTE_URL=$$(oc get route $(IMAGE_NAME)-route -n $(NAMESPACE) -o jsonpath='{.spec.host}' 2>/dev/null); \
|
||||
if [ -n "$$ROUTE_URL" ]; then \
|
||||
curl -f https://$$ROUTE_URL/health || echo "$(RED)❌ Health check failed$(NC)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Route not found$(NC)"; \
|
||||
fi
|
||||
295
README-S2I.md
Normal file
295
README-S2I.md
Normal file
@@ -0,0 +1,295 @@
|
||||
# ORU Analyzer - Source-to-Image (S2I) Deployment
|
||||
|
||||
This document describes how to deploy ORU Analyzer using OpenShift Source-to-Image (S2I) as an alternative to container-based deployment.
|
||||
|
||||
## 🚀 S2I vs Container Build
|
||||
|
||||
### Container Build (Current)
|
||||
- Uses Dockerfile + Quay.io + GitHub Actions
|
||||
- Manual build and push process
|
||||
- More control over build process
|
||||
|
||||
### Source-to-Image (S2I)
|
||||
- Direct deployment from Git repository
|
||||
- OpenShift manages build and deployment
|
||||
- Simpler deployment process
|
||||
- Automatic rebuilds on code changes
|
||||
|
||||
## 📋 Prerequisites
|
||||
|
||||
- OpenShift 4.x cluster
|
||||
- OpenShift CLI (oc) installed and configured
|
||||
- Access to the cluster with appropriate permissions
|
||||
- Git repository access
|
||||
|
||||
## 🛠️ S2I Deployment Methods
|
||||
|
||||
### Method 1: Using S2I Template (Recommended)
|
||||
|
||||
```bash
|
||||
# 1. Login to OpenShift
|
||||
oc login <cluster-url>
|
||||
|
||||
# 2. Deploy using S2I template
|
||||
./scripts/deploy-s2i.sh
|
||||
```
|
||||
|
||||
### Method 2: Using oc new-app
|
||||
|
||||
```bash
|
||||
# 1. Create namespace
|
||||
oc new-project resource-governance
|
||||
|
||||
# 2. Deploy using oc new-app
|
||||
oc new-app python:3.11~https://github.com/andersonid/openshift-resource-governance.git \
|
||||
--name=oru-analyzer \
|
||||
--env=PYTHON_VERSION=3.11 \
|
||||
--env=HOST=0.0.0.0 \
|
||||
--env=PORT=8080
|
||||
|
||||
# 3. Expose the application
|
||||
oc expose service oru-analyzer
|
||||
|
||||
# 4. Get the route
|
||||
oc get route oru-analyzer
|
||||
```
|
||||
|
||||
### Method 3: Using Template File
|
||||
|
||||
```bash
|
||||
# 1. Process and apply template
|
||||
oc process -f openshift-s2i.yaml \
|
||||
-p NAME=oru-analyzer \
|
||||
-p NAMESPACE=resource-governance \
|
||||
-p GIT_REPOSITORY=https://github.com/andersonid/openshift-resource-governance.git \
|
||||
-p GIT_REF=main \
|
||||
-p PYTHON_VERSION=3.11 \
|
||||
| oc apply -f -
|
||||
|
||||
# 2. Wait for deployment
|
||||
oc rollout status deploymentconfig/oru-analyzer
|
||||
```
|
||||
|
||||
## 📁 S2I File Structure
|
||||
|
||||
```
|
||||
├── .s2i/
|
||||
│ ├── environment # S2I environment variables
|
||||
│ └── bin/
|
||||
│ ├── assemble # Build script
|
||||
│ └── run # Runtime script
|
||||
├── openshift-s2i.yaml # OpenShift S2I template
|
||||
├── scripts/
|
||||
│ └── deploy-s2i.sh # S2I deployment script
|
||||
└── README-S2I.md # This file
|
||||
```
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The S2I configuration supports the following environment variables:
|
||||
|
||||
```bash
|
||||
# Python Configuration
|
||||
PYTHON_VERSION=3.11
|
||||
PIP_INDEX_URL=https://pypi.org/simple
|
||||
|
||||
# Application Configuration
|
||||
APP_NAME=oru-analyzer
|
||||
HOST=0.0.0.0
|
||||
PORT=8080
|
||||
WORKERS=1
|
||||
|
||||
# Resource Configuration
|
||||
CPU_REQUEST=100m
|
||||
CPU_LIMIT=500m
|
||||
MEMORY_REQUEST=256Mi
|
||||
MEMORY_LIMIT=1Gi
|
||||
|
||||
# Health Check Configuration
|
||||
HEALTH_CHECK_PATH=/health
|
||||
HEALTH_CHECK_INTERVAL=30s
|
||||
HEALTH_CHECK_TIMEOUT=10s
|
||||
HEALTH_CHECK_RETRIES=3
|
||||
```
|
||||
|
||||
### Template Parameters
|
||||
|
||||
The OpenShift template supports the following parameters:
|
||||
|
||||
- `NAME`: Application name (default: oru-analyzer)
|
||||
- `NAMESPACE`: OpenShift namespace (default: resource-governance)
|
||||
- `GIT_REPOSITORY`: Git repository URL
|
||||
- `GIT_REF`: Git reference/branch (default: main)
|
||||
- `PYTHON_VERSION`: Python version (default: 3.11)
|
||||
- `CPU_REQUEST`: CPU request (default: 100m)
|
||||
- `CPU_LIMIT`: CPU limit (default: 500m)
|
||||
- `MEMORY_REQUEST`: Memory request (default: 256Mi)
|
||||
- `MEMORY_LIMIT`: Memory limit (default: 1Gi)
|
||||
- `REPLICAS`: Number of replicas (default: 1)
|
||||
- `ROUTE_HOSTNAME`: Custom route hostname (optional)
|
||||
|
||||
## 🔧 S2I Build Process
|
||||
|
||||
### 1. Assemble Phase
|
||||
- Installs Python dependencies from `requirements.txt`
|
||||
- Creates application directory structure
|
||||
- Copies application files
|
||||
- Sets proper permissions
|
||||
- Creates startup script
|
||||
|
||||
### 2. Run Phase
|
||||
- Sets environment variables
|
||||
- Changes to application directory
|
||||
- Starts the FastAPI application
|
||||
|
||||
## 📊 Monitoring and Debugging
|
||||
|
||||
### Check Build Status
|
||||
```bash
|
||||
# List builds
|
||||
oc get builds -n resource-governance
|
||||
|
||||
# View build logs
|
||||
oc logs build/<build-name> -n resource-governance
|
||||
|
||||
# Watch build progress
|
||||
oc logs -f buildconfig/oru-analyzer -n resource-governance
|
||||
```
|
||||
|
||||
### Check Application Status
|
||||
```bash
|
||||
# Check pods
|
||||
oc get pods -n resource-governance
|
||||
|
||||
# Check deployment
|
||||
oc get deploymentconfig -n resource-governance
|
||||
|
||||
# View application logs
|
||||
oc logs -f deploymentconfig/oru-analyzer -n resource-governance
|
||||
```
|
||||
|
||||
### Check Routes
|
||||
```bash
|
||||
# List routes
|
||||
oc get route -n resource-governance
|
||||
|
||||
# Get route URL
|
||||
oc get route oru-analyzer -o jsonpath='{.spec.host}'
|
||||
```
|
||||
|
||||
## 🔄 Automatic Rebuilds
|
||||
|
||||
S2I supports automatic rebuilds when:
|
||||
|
||||
1. **Code Changes**: Push to the Git repository
|
||||
2. **Config Changes**: Update ConfigMap or environment variables
|
||||
3. **Image Changes**: Update base Python image
|
||||
|
||||
### Trigger Rebuild
|
||||
```bash
|
||||
# Manual rebuild
|
||||
oc start-build oru-analyzer
|
||||
|
||||
# Rebuild from specific Git reference
|
||||
oc start-build oru-analyzer --from-repo=https://github.com/andersonid/openshift-resource-governance.git --from-commit=main
|
||||
```
|
||||
|
||||
## 🆚 S2I vs Container Build Comparison
|
||||
|
||||
| Feature | S2I | Container Build |
|
||||
|---------|-----|-----------------|
|
||||
| **Deployment Speed** | ⚡ Fast | 🐌 Slower |
|
||||
| **Build Control** | 🔒 Limited | 🎛️ Full Control |
|
||||
| **Git Integration** | ✅ Native | ❌ Manual |
|
||||
| **Auto Rebuilds** | ✅ Automatic | ❌ Manual |
|
||||
| **Registry Dependency** | ❌ None | ✅ Required |
|
||||
| **CI/CD Complexity** | 🟢 Simple | 🟡 Complex |
|
||||
| **Debugging** | 🟡 Limited | 🟢 Full Access |
|
||||
| **Custom Builds** | ❌ Limited | ✅ Full Support |
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
```bash
|
||||
# 1. Clone repository
|
||||
git clone https://github.com/andersonid/openshift-resource-governance.git
|
||||
cd openshift-resource-governance
|
||||
|
||||
# 2. Login to OpenShift
|
||||
oc login <cluster-url>
|
||||
|
||||
# 3. Deploy using S2I
|
||||
./scripts/deploy-s2i.sh
|
||||
|
||||
# 4. Access application
|
||||
# Get URL from output or run:
|
||||
oc get route oru-analyzer -o jsonpath='{.spec.host}'
|
||||
```
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Build Fails**
|
||||
```bash
|
||||
# Check build logs
|
||||
oc logs build/<build-name>
|
||||
|
||||
# Check build configuration
|
||||
oc describe buildconfig oru-analyzer
|
||||
```
|
||||
|
||||
2. **Application Won't Start**
|
||||
```bash
|
||||
# Check pod logs
|
||||
oc logs deploymentconfig/oru-analyzer
|
||||
|
||||
# Check pod status
|
||||
oc describe pod <pod-name>
|
||||
```
|
||||
|
||||
3. **Route Not Accessible**
|
||||
```bash
|
||||
# Check route configuration
|
||||
oc describe route oru-analyzer
|
||||
|
||||
# Check service
|
||||
oc get svc oru-analyzer
|
||||
```
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Get all resources
|
||||
oc get all -n resource-governance
|
||||
|
||||
# Check events
|
||||
oc get events -n resource-governance
|
||||
|
||||
# Check build configuration
|
||||
oc describe buildconfig oru-analyzer
|
||||
|
||||
# Check deployment configuration
|
||||
oc describe deploymentconfig oru-analyzer
|
||||
```
|
||||
|
||||
## 📚 Additional Resources
|
||||
|
||||
- [OpenShift Source-to-Image Documentation](https://docs.openshift.com/container-platform/4.15/builds/build-strategies.html#builds-strategy-s2i_build-strategies)
|
||||
- [Python S2I Builder](https://github.com/sclorg/s2i-python-container)
|
||||
- [OpenShift Templates](https://docs.openshift.com/container-platform/4.15/openshift_images/using-templates.html)
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
To contribute to the S2I configuration:
|
||||
|
||||
1. Modify `.s2i/environment` for environment variables
|
||||
2. Update `.s2i/bin/assemble` for build process
|
||||
3. Update `.s2i/bin/run` for runtime behavior
|
||||
4. Modify `openshift-s2i.yaml` for OpenShift resources
|
||||
5. Test with `./scripts/deploy-s2i.sh`
|
||||
|
||||
---
|
||||
|
||||
**Note**: S2I deployment is an alternative to the standard container build process. Both methods are supported and can be used interchangeably based on your requirements.
|
||||
63
README.md
63
README.md
@@ -31,6 +31,20 @@ A comprehensive tool for analyzing user workloads and resource usage in OpenShif
|
||||
|
||||
### 🚀 Quick Deploy (Recommended)
|
||||
|
||||
#### Option 1: Source-to-Image (S2I) - Fastest
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/andersonid/openshift-resource-governance.git
|
||||
cd openshift-resource-governance
|
||||
|
||||
# 2. Login to OpenShift
|
||||
oc login <cluster-url>
|
||||
|
||||
# 3. Deploy using S2I (automatic build from Git)
|
||||
./scripts/deploy-s2i.sh
|
||||
```
|
||||
|
||||
#### Option 2: Container Build (Traditional)
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/andersonid/openshift-resource-governance.git
|
||||
@@ -294,13 +308,46 @@ python -m uvicorn app.main:app --reload --host 0.0.0.0 --port 8080
|
||||
|
||||
### Available Scripts
|
||||
```bash
|
||||
# Essential scripts (only 4 remaining after cleanup)
|
||||
# Essential scripts (only 5 remaining after cleanup)
|
||||
./setup.sh # Initial environment setup
|
||||
./scripts/build-and-push.sh # Build and push to Quay.io
|
||||
./scripts/deploy-complete.sh # Complete OpenShift deployment
|
||||
./scripts/deploy-complete.sh # Complete OpenShift deployment (Container Build)
|
||||
./scripts/deploy-s2i.sh # S2I deployment (Source-to-Image)
|
||||
./scripts/undeploy-complete.sh # Complete application removal
|
||||
```
|
||||
|
||||
## 🚀 Source-to-Image (S2I) Support
|
||||
|
||||
ORU Analyzer now supports **Source-to-Image (S2I)** deployment as an alternative to container-based deployment.
|
||||
|
||||
### S2I Benefits
|
||||
- ⚡ **Faster deployment** - Direct from Git repository
|
||||
- 🔄 **Automatic rebuilds** - When code changes
|
||||
- 🎯 **No external registry** - OpenShift manages everything
|
||||
- 🔧 **Simpler CI/CD** - No GitHub Actions + Quay.io needed
|
||||
|
||||
### S2I vs Container Build
|
||||
|
||||
| Feature | S2I | Container Build |
|
||||
|---------|-----|-----------------|
|
||||
| **Deployment Speed** | ⚡ Fast | 🐌 Slower |
|
||||
| **Auto Rebuilds** | ✅ Yes | ❌ No |
|
||||
| **Git Integration** | ✅ Native | ❌ Manual |
|
||||
| **Registry Dependency** | ❌ None | ✅ Quay.io |
|
||||
| **Build Control** | 🔒 Limited | 🎛️ Full Control |
|
||||
|
||||
### S2I Quick Start
|
||||
```bash
|
||||
# Deploy using S2I
|
||||
./scripts/deploy-s2i.sh
|
||||
|
||||
# Or use oc new-app
|
||||
oc new-app python:3.11~https://github.com/andersonid/openshift-resource-governance.git \
|
||||
--name=oru-analyzer --env=PYTHON_VERSION=3.11
|
||||
```
|
||||
|
||||
For detailed S2I documentation, see [README-S2I.md](README-S2I.md).
|
||||
|
||||
### Tests
|
||||
```bash
|
||||
# Test import
|
||||
@@ -312,9 +359,17 @@ curl http://localhost:8080/health
|
||||
|
||||
## 🆕 Recent Updates
|
||||
|
||||
### **Latest Version (v2.0.0) - PatternFly UI Revolution**
|
||||
### **Latest Version (v2.1.0) - S2I Support Added**
|
||||
|
||||
**🎨 Complete UI Overhaul:**
|
||||
**🚀 Source-to-Image (S2I) Support:**
|
||||
- ✅ **S2I Deployment**: Alternative deployment method using OpenShift Source-to-Image
|
||||
- ✅ **Automatic Builds**: Direct deployment from Git repository with auto-rebuilds
|
||||
- ✅ **Simplified CI/CD**: No external registry dependency (Quay.io optional)
|
||||
- ✅ **Faster Deployment**: S2I deployment is significantly faster than container builds
|
||||
- ✅ **Git Integration**: Native OpenShift integration with Git repositories
|
||||
- ✅ **Complete S2I Stack**: Custom assemble/run scripts, OpenShift templates, and deployment automation
|
||||
|
||||
**🎨 Previous Version (v2.0.0) - PatternFly UI Revolution:**
|
||||
- ✅ **PatternFly Design System**: Modern, enterprise-grade UI components
|
||||
- ✅ **Smart Recommendations Gallery**: Individual workload cards with bulk selection
|
||||
- ✅ **VPA CRD Integration**: Real Kubernetes API for Vertical Pod Autoscaler management
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,701 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>OpenShift Resource Governance Tool</title>
|
||||
|
||||
<!-- PatternFly 6.3.1 CSS -->
|
||||
<link rel="stylesheet" href="https://unpkg.com/@patternfly/patternfly@6.3.1/patternfly.css">
|
||||
<link rel="stylesheet" href="https://unpkg.com/@patternfly/patternfly@6.3.1/patternfly-addons.css">
|
||||
|
||||
<!-- PatternFly 6.3.1 Icons -->
|
||||
<link rel="stylesheet" href="https://unpkg.com/@patternfly/patternfly@6.3.1/patternfly-icons.css">
|
||||
|
||||
<!-- Custom styles -->
|
||||
<style>
|
||||
.pf-c-page__main {
|
||||
--pf-c-page__main--BackgroundColor: var(--pf-global--BackgroundColor--100);
|
||||
}
|
||||
|
||||
.workload-card {
|
||||
margin-bottom: var(--pf-global--spacer--md);
|
||||
}
|
||||
|
||||
.metric-card {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.metric-value {
|
||||
font-size: var(--pf-global--FontSize--2xl);
|
||||
font-weight: var(--pf-global--FontWeight--bold);
|
||||
color: var(--pf-global--primary-color--100);
|
||||
}
|
||||
|
||||
.metric-label {
|
||||
font-size: var(--pf-global--FontSize--sm);
|
||||
color: var(--pf-global--Color--200);
|
||||
}
|
||||
|
||||
.severity-critical {
|
||||
--pf-c-badge--m-read--BackgroundColor: var(--pf-global--danger-color--100);
|
||||
}
|
||||
|
||||
.severity-warning {
|
||||
--pf-c-badge--m-read--BackgroundColor: var(--pf-global--warning-color--100);
|
||||
}
|
||||
|
||||
.severity-error {
|
||||
--pf-c-badge--m-read--BackgroundColor: var(--pf-global--danger-color--200);
|
||||
}
|
||||
|
||||
.severity-info {
|
||||
--pf-c-badge--m-read--BackgroundColor: var(--pf-global--info-color--100);
|
||||
}
|
||||
|
||||
.loading-spinner {
|
||||
text-align: center;
|
||||
padding: var(--pf-global--spacer--xl);
|
||||
}
|
||||
|
||||
.error-message {
|
||||
color: var(--pf-global--danger-color--100);
|
||||
text-align: center;
|
||||
padding: var(--pf-global--spacer--lg);
|
||||
}
|
||||
|
||||
.breadcrumb-container {
|
||||
margin-bottom: var(--pf-global--spacer--md);
|
||||
}
|
||||
|
||||
.chart-container {
|
||||
height: 300px;
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
}
|
||||
|
||||
.workload-details {
|
||||
margin-top: var(--pf-global--spacer--lg);
|
||||
}
|
||||
|
||||
.yaml-content {
|
||||
font-family: 'Courier New', monospace;
|
||||
font-size: var(--pf-global--FontSize--sm);
|
||||
background-color: var(--pf-global--BackgroundColor--200);
|
||||
padding: var(--pf-global--spacer--md);
|
||||
border-radius: var(--pf-global--BorderRadius--sm);
|
||||
white-space: pre-wrap;
|
||||
overflow-x: auto;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app">
|
||||
<!-- Page Structure -->
|
||||
<div class="pf-c-page" id="page-layout-default-nav">
|
||||
<!-- Header -->
|
||||
<header class="pf-c-page__header">
|
||||
<div class="pf-c-page__header-brand">
|
||||
<div class="pf-c-page__header-brand-toggle">
|
||||
<button class="pf-c-button pf-m-plain" type="button" id="nav-toggle" aria-label="Global navigation" aria-expanded="true" aria-controls="primary-nav">
|
||||
<i class="fas fa-bars" aria-hidden="true"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="pf-c-page__header-brand-link">
|
||||
<img class="pf-c-brand" src="https://www.patternfly.org/assets/images/logo__pf--reverse-on-md.svg" alt="PatternFly" />
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-page__header-tools">
|
||||
<div class="pf-c-page__header-tools-group">
|
||||
<div class="pf-c-page__header-tools-item">
|
||||
<button class="pf-c-button pf-m-plain" type="button" aria-label="Settings">
|
||||
<i class="fas fa-cog" aria-hidden="true"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="pf-c-page__header-tools-item">
|
||||
<button class="pf-c-button pf-m-plain" type="button" aria-label="Help">
|
||||
<i class="fas fa-question-circle" aria-hidden="true"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Sidebar -->
|
||||
<div class="pf-c-page__sidebar" id="primary-nav">
|
||||
<div class="pf-c-page__sidebar-body">
|
||||
<nav class="pf-c-nav" id="primary-nav" aria-label="Global">
|
||||
<ul class="pf-c-nav__list">
|
||||
<li class="pf-c-nav__item">
|
||||
<a href="#" class="pf-c-nav__link" data-section="workload-scanner">
|
||||
<i class="fas fa-search" aria-hidden="true"></i>
|
||||
Workload Scanner
|
||||
</a>
|
||||
</li>
|
||||
<li class="pf-c-nav__item">
|
||||
<a href="#" class="pf-c-nav__link" data-section="historical-analysis">
|
||||
<i class="fas fa-chart-line" aria-hidden="true"></i>
|
||||
Historical Analysis
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Main Content -->
|
||||
<main class="pf-c-page__main" tabindex="-1">
|
||||
<!-- Workload Scanner Section -->
|
||||
<section class="pf-c-page__main-section" id="workload-scanner-section" style="display: block;">
|
||||
<div class="pf-c-page__main-breadcrumb">
|
||||
<nav class="pf-c-breadcrumb" aria-label="breadcrumb">
|
||||
<ol class="pf-c-breadcrumb__list">
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<a href="#" class="pf-c-breadcrumb__link">Workload Scanner</a>
|
||||
</li>
|
||||
</ol>
|
||||
</nav>
|
||||
</div>
|
||||
|
||||
<div class="pf-c-page__main-section">
|
||||
<div class="pf-l-grid pf-m-gutter">
|
||||
<!-- Page Title -->
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-c-content">
|
||||
<h1>Workload Scanner</h1>
|
||||
<p>Identify and analyze workloads with resource configuration issues</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Summary Cards -->
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-l-grid pf-m-gutter" id="summary-cards">
|
||||
<!-- Cards will be populated by JavaScript -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Workloads Table -->
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h2>Workloads with Issues</h2>
|
||||
</div>
|
||||
<div class="pf-c-card__actions">
|
||||
<button class="pf-c-button pf-m-primary" id="refresh-workloads">
|
||||
<i class="fas fa-sync-alt" aria-hidden="true"></i>
|
||||
Refresh
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div id="workloads-table-container">
|
||||
<div class="loading-spinner">
|
||||
<div class="pf-c-spinner" role="progressbar" aria-label="Loading workloads">
|
||||
<span class="pf-c-spinner__clipper"></span>
|
||||
<span class="pf-c-spinner__lead-ball"></span>
|
||||
<span class="pf-c-spinner__tail-ball"></span>
|
||||
</div>
|
||||
<div>Loading workloads...</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Historical Analysis Section -->
|
||||
<section class="pf-c-page__main-section" id="historical-analysis-section" style="display: none;">
|
||||
<div class="pf-c-page__main-breadcrumb">
|
||||
<nav class="pf-c-breadcrumb" aria-label="breadcrumb">
|
||||
<ol class="pf-c-breadcrumb__list">
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<a href="#" class="pf-c-breadcrumb__link" data-section="workload-scanner">Workload Scanner</a>
|
||||
</li>
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<span class="pf-c-breadcrumb__item-text">Historical Analysis</span>
|
||||
</li>
|
||||
</ol>
|
||||
</nav>
|
||||
</div>
|
||||
|
||||
<div class="pf-c-page__main-section">
|
||||
<div class="pf-l-grid pf-m-gutter">
|
||||
<!-- Page Title -->
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-c-content">
|
||||
<h1>Historical Analysis</h1>
|
||||
<p>Resource consumption analysis and historical data</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Workloads List -->
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h2>Available Workloads</h2>
|
||||
</div>
|
||||
<div class="pf-c-card__actions">
|
||||
<button class="pf-c-button pf-m-primary" id="refresh-historical">
|
||||
<i class="fas fa-sync-alt" aria-hidden="true"></i>
|
||||
Refresh
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div id="historical-workloads-container">
|
||||
<div class="loading-spinner">
|
||||
<div class="pf-c-spinner" role="progressbar" aria-label="Loading historical data">
|
||||
<span class="pf-c-spinner__clipper"></span>
|
||||
<span class="pf-c-spinner__lead-ball"></span>
|
||||
<span class="pf-c-spinner__tail-ball"></span>
|
||||
</div>
|
||||
<div>Loading historical data...</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Workload Details (hidden initially) -->
|
||||
<div class="pf-l-grid__item pf-m-12-col" id="workload-details-container" style="display: none;">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h2 id="workload-details-title">Workload Details</h2>
|
||||
</div>
|
||||
<div class="pf-c-card__actions">
|
||||
<button class="pf-c-button pf-m-plain" id="close-workload-details">
|
||||
<i class="fas fa-times" aria-hidden="true"></i>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div id="workload-details-content">
|
||||
<!-- Workload details will be populated here -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- PatternFly 6.3.1 JavaScript -->
|
||||
<script src="https://unpkg.com/@patternfly/patternfly@6.3.1/patternfly.js"></script>
|
||||
|
||||
<!-- Font Awesome for icons -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
||||
|
||||
<!-- Custom JavaScript -->
|
||||
<script>
|
||||
// Global variables
|
||||
let currentData = null;
|
||||
let currentSection = 'workload-scanner';
|
||||
|
||||
// Initialize the application
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
initializeApp();
|
||||
});
|
||||
|
||||
function initializeApp() {
|
||||
// Setup navigation
|
||||
setupNavigation();
|
||||
|
||||
// Load initial data
|
||||
loadWorkloadScanner();
|
||||
}
|
||||
|
||||
function setupNavigation() {
|
||||
// Sidebar navigation
|
||||
const navLinks = document.querySelectorAll('.pf-c-nav__link[data-section]');
|
||||
navLinks.forEach(link => {
|
||||
link.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
const section = this.getAttribute('data-section');
|
||||
showSection(section);
|
||||
});
|
||||
});
|
||||
|
||||
// Breadcrumb navigation
|
||||
const breadcrumbLinks = document.querySelectorAll('.pf-c-breadcrumb__link[data-section]');
|
||||
breadcrumbLinks.forEach(link => {
|
||||
link.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
const section = this.getAttribute('data-section');
|
||||
showSection(section);
|
||||
});
|
||||
});
|
||||
|
||||
// Close workload details
|
||||
document.getElementById('close-workload-details').addEventListener('click', function() {
|
||||
document.getElementById('workload-details-container').style.display = 'none';
|
||||
});
|
||||
|
||||
// Refresh buttons
|
||||
document.getElementById('refresh-workloads').addEventListener('click', loadWorkloadScanner);
|
||||
document.getElementById('refresh-historical').addEventListener('click', loadHistoricalAnalysis);
|
||||
}
|
||||
|
||||
function showSection(section) {
|
||||
// Hide all sections
|
||||
document.querySelectorAll('.pf-c-page__main-section').forEach(sec => {
|
||||
sec.style.display = 'none';
|
||||
});
|
||||
|
||||
// Show selected section
|
||||
document.getElementById(section + '-section').style.display = 'block';
|
||||
|
||||
// Update active nav item
|
||||
document.querySelectorAll('.pf-c-nav__link').forEach(link => {
|
||||
link.classList.remove('pf-m-current');
|
||||
});
|
||||
document.querySelector(`.pf-c-nav__link[data-section="${section}"]`).classList.add('pf-m-current');
|
||||
|
||||
currentSection = section;
|
||||
|
||||
// Load section data
|
||||
if (section === 'workload-scanner') {
|
||||
loadWorkloadScanner();
|
||||
} else if (section === 'historical-analysis') {
|
||||
loadHistoricalAnalysis();
|
||||
}
|
||||
}
|
||||
|
||||
async function loadWorkloadScanner() {
|
||||
try {
|
||||
showLoading('workloads-table-container');
|
||||
|
||||
// Load cluster status
|
||||
const clusterResponse = await fetch('/api/v1/cluster/status');
|
||||
const clusterData = await clusterResponse.json();
|
||||
|
||||
// Load validations
|
||||
const validationsResponse = await fetch('/api/v1/validations');
|
||||
const validationsData = await validationsResponse.json();
|
||||
|
||||
currentData = { cluster: clusterData, validations: validationsData };
|
||||
|
||||
// Update summary cards
|
||||
updateSummaryCards(clusterData);
|
||||
|
||||
// Update workloads table
|
||||
updateWorkloadsTable(validationsData);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error loading workload scanner data:', error);
|
||||
showError('workloads-table-container', 'Failed to load workload data');
|
||||
}
|
||||
}
|
||||
|
||||
async function loadHistoricalAnalysis() {
|
||||
try {
|
||||
showLoading('historical-workloads-container');
|
||||
|
||||
// Load historical data
|
||||
const response = await fetch('/api/v1/historical-analysis');
|
||||
const data = await response.json();
|
||||
|
||||
updateHistoricalWorkloads(data);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error loading historical analysis data:', error);
|
||||
showError('historical-workloads-container', 'Failed to load historical data');
|
||||
}
|
||||
}
|
||||
|
||||
function updateSummaryCards(data) {
|
||||
const container = document.getElementById('summary-cards');
|
||||
|
||||
const cards = [
|
||||
{
|
||||
title: 'Total Workloads',
|
||||
value: data.total_pods || 0,
|
||||
icon: 'fas fa-cube',
|
||||
color: 'blue'
|
||||
},
|
||||
{
|
||||
title: 'Namespaces',
|
||||
value: data.total_namespaces || 0,
|
||||
icon: 'fas fa-layer-group',
|
||||
color: 'green'
|
||||
},
|
||||
{
|
||||
title: 'Critical Issues',
|
||||
value: data.critical_issues || 0,
|
||||
icon: 'fas fa-exclamation-triangle',
|
||||
color: 'red'
|
||||
},
|
||||
{
|
||||
title: 'Warnings',
|
||||
value: data.total_warnings || 0,
|
||||
icon: 'fas fa-exclamation-circle',
|
||||
color: 'orange'
|
||||
}
|
||||
];
|
||||
|
||||
container.innerHTML = cards.map(card => `
|
||||
<div class="pf-l-grid__item pf-m-3-col">
|
||||
<div class="pf-c-card metric-card">
|
||||
<div class="pf-c-card__body">
|
||||
<div class="metric-value">${card.value}</div>
|
||||
<div class="metric-label">
|
||||
<i class="${card.icon}" aria-hidden="true"></i>
|
||||
${card.title}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`).join('');
|
||||
}
|
||||
|
||||
function updateWorkloadsTable(data) {
|
||||
const container = document.getElementById('workloads-table-container');
|
||||
|
||||
if (!data.namespaces || data.namespaces.length === 0) {
|
||||
container.innerHTML = '<div class="error-message">No workload data available</div>';
|
||||
return;
|
||||
}
|
||||
|
||||
const tableHTML = `
|
||||
<div class="pf-c-table">
|
||||
<table class="pf-c-table__table" role="grid" aria-label="Workloads table">
|
||||
<thead>
|
||||
<tr class="pf-c-table__row">
|
||||
<th class="pf-c-table__th">Namespace</th>
|
||||
<th class="pf-c-table__th">Pods</th>
|
||||
<th class="pf-c-table__th">Issues</th>
|
||||
<th class="pf-c-table__th">Severity</th>
|
||||
<th class="pf-c-table__th">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
${data.namespaces.map(namespace => `
|
||||
<tr class="pf-c-table__row">
|
||||
<td class="pf-c-table__td">
|
||||
<strong>${namespace.namespace}</strong>
|
||||
</td>
|
||||
<td class="pf-c-table__td">${Object.keys(namespace.pods || {}).length}</td>
|
||||
<td class="pf-c-table__td">${namespace.total_validations || 0}</td>
|
||||
<td class="pf-c-table__td">
|
||||
<span class="pf-c-badge severity-${getHighestSeverity(namespace)}">
|
||||
${getHighestSeverity(namespace)}
|
||||
</span>
|
||||
</td>
|
||||
<td class="pf-c-table__td">
|
||||
<div class="pf-c-button-group">
|
||||
<button class="pf-c-button pf-m-primary pf-m-small" onclick="analyzeWorkload('${namespace.namespace}')">
|
||||
Analyze
|
||||
</button>
|
||||
<button class="pf-c-button pf-m-secondary pf-m-small" onclick="fixWorkload('${namespace.namespace}')">
|
||||
Fix
|
||||
</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('')}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
`;
|
||||
|
||||
container.innerHTML = tableHTML;
|
||||
}
|
||||
|
||||
function updateHistoricalWorkloads(data) {
|
||||
const container = document.getElementById('historical-workloads-container');
|
||||
|
||||
if (!data.workloads || data.workloads.length === 0) {
|
||||
container.innerHTML = '<div class="error-message">No historical data available</div>';
|
||||
return;
|
||||
}
|
||||
|
||||
const tableHTML = `
|
||||
<div class="pf-c-table">
|
||||
<table class="pf-c-table__table" role="grid" aria-label="Historical workloads table">
|
||||
<thead>
|
||||
<tr class="pf-c-table__row">
|
||||
<th class="pf-c-table__th">Workload</th>
|
||||
<th class="pf-c-table__th">Namespace</th>
|
||||
<th class="pf-c-table__th">CPU Usage</th>
|
||||
<th class="pf-c-table__th">Memory Usage</th>
|
||||
<th class="pf-c-table__th">Last Updated</th>
|
||||
<th class="pf-c-table__th">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
${data.workloads.map(workload => `
|
||||
<tr class="pf-c-table__row">
|
||||
<td class="pf-c-table__td">
|
||||
<strong>${workload.name}</strong>
|
||||
</td>
|
||||
<td class="pf-c-table__td">${workload.namespace}</td>
|
||||
<td class="pf-c-table__td">${workload.cpu_usage || 'N/A'}</td>
|
||||
<td class="pf-c-table__td">${workload.memory_usage || 'N/A'}</td>
|
||||
<td class="pf-c-table__td">${workload.last_updated || 'N/A'}</td>
|
||||
<td class="pf-c-table__td">
|
||||
<button class="pf-c-button pf-m-primary pf-m-small" onclick="showWorkloadDetails('${workload.name}', '${workload.namespace}')">
|
||||
View Details
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('')}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
`;
|
||||
|
||||
container.innerHTML = tableHTML;
|
||||
}
|
||||
|
||||
function showWorkloadDetails(workloadName, namespace) {
|
||||
// Update breadcrumb
|
||||
const breadcrumb = document.querySelector('#historical-analysis-section .pf-c-breadcrumb__list');
|
||||
breadcrumb.innerHTML = `
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<a href="#" class="pf-c-breadcrumb__link" data-section="workload-scanner">Workload Scanner</a>
|
||||
</li>
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<a href="#" class="pf-c-breadcrumb__link" data-section="historical-analysis">Historical Analysis</a>
|
||||
</li>
|
||||
<li class="pf-c-breadcrumb__item">
|
||||
<span class="pf-c-breadcrumb__item-divider">
|
||||
<i class="fas fa-angle-right" aria-hidden="true"></i>
|
||||
</span>
|
||||
<span class="pf-c-breadcrumb__item-text">${workloadName}</span>
|
||||
</li>
|
||||
`;
|
||||
|
||||
// Update title
|
||||
document.getElementById('workload-details-title').textContent = `${workloadName} - ${namespace}`;
|
||||
|
||||
// Load workload details
|
||||
loadWorkloadDetails(workloadName, namespace);
|
||||
|
||||
// Show details container
|
||||
document.getElementById('workload-details-container').style.display = 'block';
|
||||
}
|
||||
|
||||
async function loadWorkloadDetails(workloadName, namespace) {
|
||||
try {
|
||||
const response = await fetch(`/api/v1/historical-analysis/${namespace}/${workloadName}`);
|
||||
const data = await response.json();
|
||||
|
||||
updateWorkloadDetails(data);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error loading workload details:', error);
|
||||
document.getElementById('workload-details-content').innerHTML =
|
||||
'<div class="error-message">Failed to load workload details</div>';
|
||||
}
|
||||
}
|
||||
|
||||
function updateWorkloadDetails(data) {
|
||||
const container = document.getElementById('workload-details-content');
|
||||
|
||||
container.innerHTML = `
|
||||
<div class="pf-l-grid pf-m-gutter">
|
||||
<div class="pf-l-grid__item pf-m-6-col">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h3>CPU Usage</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div class="chart-container" id="cpu-chart">
|
||||
<!-- CPU chart will be rendered here -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-l-grid__item pf-m-6-col">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h3>Memory Usage</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div class="chart-container" id="memory-chart">
|
||||
<!-- Memory chart will be rendered here -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-l-grid__item pf-m-12-col">
|
||||
<div class="pf-c-card">
|
||||
<div class="pf-c-card__header">
|
||||
<div class="pf-c-card__title">
|
||||
<h3>Resource Recommendations</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pf-c-card__body">
|
||||
<div class="yaml-content">${data.recommendations || 'No recommendations available'}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
function analyzeWorkload(namespace) {
|
||||
console.log('Analyzing workload:', namespace);
|
||||
// TODO: Implement workload analysis
|
||||
}
|
||||
|
||||
function fixWorkload(namespace) {
|
||||
console.log('Fixing workload:', namespace);
|
||||
// TODO: Implement workload fixing
|
||||
}
|
||||
|
||||
function getHighestSeverity(namespace) {
|
||||
const breakdown = namespace.severity_breakdown || {};
|
||||
if (breakdown.error > 0) return 'error';
|
||||
if (breakdown.warning > 0) return 'warning';
|
||||
if (breakdown.info > 0) return 'info';
|
||||
return 'info';
|
||||
}
|
||||
|
||||
function showLoading(containerId) {
|
||||
const container = document.getElementById(containerId);
|
||||
container.innerHTML = `
|
||||
<div class="loading-spinner">
|
||||
<div class="pf-c-spinner" role="progressbar" aria-label="Loading">
|
||||
<span class="pf-c-spinner__clipper"></span>
|
||||
<span class="pf-c-spinner__lead-ball"></span>
|
||||
<span class="pf-c-spinner__tail-ball"></span>
|
||||
</div>
|
||||
<div>Loading...</div>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
function showError(containerId, message) {
|
||||
const container = document.getElementById(containerId);
|
||||
container.innerHTML = `<div class="error-message">${message}</div>`;
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,99 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: resource-governance
|
||||
namespace: resource-governance
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
serviceAccountName: resource-governance-sa
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000940000
|
||||
fsGroup: 1000940000
|
||||
containers:
|
||||
- name: resource-governance
|
||||
image: python:3.11-slim
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
command: ['sh', '-c']
|
||||
args:
|
||||
- |
|
||||
apt-get update && apt-get install -y git curl
|
||||
git clone https://github.com/andersonid/openshift-resource-governance.git /tmp/app
|
||||
cd /tmp/app
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8080
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
- name: CPU_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CPU_LIMIT_RATIO
|
||||
- name: MEMORY_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MEMORY_LIMIT_RATIO
|
||||
- name: PROMETHEUS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: PROMETHEUS_URL
|
||||
- name: VPA_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: VPA_NAMESPACES
|
||||
- name: LOG_LEVEL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: LOG_LEVEL
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
@@ -1,121 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: resource-governance
|
||||
namespace: resource-governance
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
serviceAccountName: resource-governance-sa
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000940000
|
||||
fsGroup: 1000940000
|
||||
initContainers:
|
||||
- name: download-app
|
||||
image: alpine/git:latest
|
||||
command: ['sh', '-c']
|
||||
args:
|
||||
- |
|
||||
git clone https://github.com/andersonid/openshift-resource-governance.git /tmp/app
|
||||
cp -r /tmp/app/app /shared/
|
||||
cp /tmp/app/requirements.txt /shared/
|
||||
volumeMounts:
|
||||
- name: app-code
|
||||
mountPath: /shared
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: resource-governance
|
||||
image: python:3.11-slim
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
command: ['sh', '-c']
|
||||
args:
|
||||
- |
|
||||
pip install --no-cache-dir -r /app/requirements.txt
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8080
|
||||
volumeMounts:
|
||||
- name: app-code
|
||||
mountPath: /app
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
- name: CPU_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CPU_LIMIT_RATIO
|
||||
- name: MEMORY_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MEMORY_LIMIT_RATIO
|
||||
- name: PROMETHEUS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: PROMETHEUS_URL
|
||||
- name: VPA_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: VPA_NAMESPACES
|
||||
- name: LOG_LEVEL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: LOG_LEVEL
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: app-code
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
@@ -1,151 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: resource-governance
|
||||
namespace: resource-governance
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
serviceAccountName: resource-governance-sa
|
||||
imagePullSecrets:
|
||||
- name: docker-hub-secret
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000940000
|
||||
fsGroup: 1000940000
|
||||
containers:
|
||||
- name: resource-governance
|
||||
image: andersonid/openshift-resource-governance:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/v1/health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/v1/health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
- name: CPU_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CPU_LIMIT_RATIO
|
||||
- name: MEMORY_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MEMORY_LIMIT_RATIO
|
||||
- name: MIN_CPU_REQUEST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MIN_CPU_REQUEST
|
||||
- name: MIN_MEMORY_REQUEST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MIN_MEMORY_REQUEST
|
||||
- name: CRITICAL_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CRITICAL_NAMESPACES
|
||||
- name: PROMETHEUS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: PROMETHEUS_URL
|
||||
- name: REPORT_EXPORT_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: REPORT_EXPORT_PATH
|
||||
- name: ENABLE_RBAC
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: ENABLE_RBAC
|
||||
- name: SERVICE_ACCOUNT_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: SERVICE_ACCOUNT_NAME
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumeMounts:
|
||||
- name: reports-volume
|
||||
mountPath: /tmp/reports
|
||||
- name: tmp-volume
|
||||
mountPath: /tmp
|
||||
volumes:
|
||||
- name: reports-volume
|
||||
emptyDir: {}
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
@@ -5,7 +5,7 @@ resources:
|
||||
- namespace.yaml
|
||||
- rbac.yaml
|
||||
- configmap.yaml
|
||||
- daemonset.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- route.yaml
|
||||
|
||||
|
||||
@@ -1,294 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Template
|
||||
metadata:
|
||||
name: resource-governance-git-deploy
|
||||
annotations:
|
||||
description: "Deploy OpenShift Resource Governance Tool from GitHub repository"
|
||||
tags: "governance,resources,openshift,github"
|
||||
parameters:
|
||||
- name: GITHUB_REPO
|
||||
displayName: "GitHub Repository URL"
|
||||
description: "URL do repositório GitHub"
|
||||
value: "https://github.com/andersonid/openshift-resource-governance.git"
|
||||
- name: IMAGE_TAG
|
||||
displayName: "Image Tag"
|
||||
description: "Tag da imagem Docker"
|
||||
value: "latest"
|
||||
- name: REGISTRY
|
||||
displayName: "Container Registry"
|
||||
description: "Registry da imagem Docker"
|
||||
value: "andersonid"
|
||||
- name: NAMESPACE
|
||||
displayName: "Namespace"
|
||||
description: "Namespace para deploy"
|
||||
value: "resource-governance"
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ${NAMESPACE}
|
||||
labels:
|
||||
name: ${NAMESPACE}
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
app.kubernetes.io/part-of: openshift-governance
|
||||
- apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: resource-governance-quota
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "2"
|
||||
requests.memory: 4Gi
|
||||
limits.cpu: "4"
|
||||
limits.memory: 8Gi
|
||||
pods: "10"
|
||||
- apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: resource-governance-limits
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
defaultRequest:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
type: Container
|
||||
- apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: resource-governance-sa
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: resource-governance-role
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "namespaces", "nodes", "events"]
|
||||
verbs: ["get", "list", "watch", "patch", "update", "create"]
|
||||
- apiGroups: ["autoscaling.k8s.io"]
|
||||
resources: ["verticalpodautoscalers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "replicasets"]
|
||||
verbs: ["get", "list", "watch", "patch", "update"]
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: resource-governance-binding
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: resource-governance-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: resource-governance-sa
|
||||
namespace: ${NAMESPACE}
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: resource-governance-config
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
data:
|
||||
CPU_LIMIT_RATIO: "3.0"
|
||||
MEMORY_LIMIT_RATIO: "3.0"
|
||||
MIN_CPU_REQUEST: "10m"
|
||||
MIN_MEMORY_REQUEST: "32Mi"
|
||||
CRITICAL_NAMESPACES: |
|
||||
openshift-monitoring
|
||||
openshift-ingress
|
||||
openshift-apiserver
|
||||
openshift-controller-manager
|
||||
openshift-sdn
|
||||
PROMETHEUS_URL: "http://prometheus.openshift-monitoring.svc.cluster.local:9090"
|
||||
REPORT_EXPORT_PATH: "/tmp/reports"
|
||||
ENABLE_RBAC: "true"
|
||||
SERVICE_ACCOUNT_NAME: "resource-governance-sa"
|
||||
GITHUB_REPO: "${GITHUB_REPO}"
|
||||
- apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: resource-governance
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
annotations:
|
||||
github.com/repo: "${GITHUB_REPO}"
|
||||
spec:
|
||||
serviceAccountName: resource-governance-sa
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
containers:
|
||||
- name: resource-governance
|
||||
image: ${REGISTRY}/resource-governance:${IMAGE_TAG}
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
- name: CPU_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CPU_LIMIT_RATIO
|
||||
- name: MEMORY_LIMIT_RATIO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MEMORY_LIMIT_RATIO
|
||||
- name: MIN_CPU_REQUEST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MIN_CPU_REQUEST
|
||||
- name: MIN_MEMORY_REQUEST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: MIN_MEMORY_REQUEST
|
||||
- name: CRITICAL_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: CRITICAL_NAMESPACES
|
||||
- name: PROMETHEUS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: PROMETHEUS_URL
|
||||
- name: REPORT_EXPORT_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: REPORT_EXPORT_PATH
|
||||
- name: ENABLE_RBAC
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: ENABLE_RBAC
|
||||
- name: SERVICE_ACCOUNT_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: SERVICE_ACCOUNT_NAME
|
||||
- name: GITHUB_REPO
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: resource-governance-config
|
||||
key: GITHUB_REPO
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumeMounts:
|
||||
- name: reports-volume
|
||||
mountPath: /tmp/reports
|
||||
- name: tmp-volume
|
||||
mountPath: /tmp
|
||||
volumes:
|
||||
- name: reports-volume
|
||||
emptyDir: {}
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: resource-governance-service
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
- apiVersion: route.openshift.io/v1
|
||||
kind: Route
|
||||
metadata:
|
||||
name: resource-governance-route
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app.kubernetes.io/name: resource-governance
|
||||
app.kubernetes.io/component: governance
|
||||
annotations:
|
||||
haproxy.router.openshift.io/timeout: "300s"
|
||||
haproxy.router.openshift.io/rate-limit: "100"
|
||||
spec:
|
||||
host: resource-governance.apps.openshift.local
|
||||
to:
|
||||
kind: Service
|
||||
name: resource-governance-service
|
||||
weight: 100
|
||||
port:
|
||||
targetPort: http
|
||||
tls:
|
||||
termination: edge
|
||||
insecureEdgeTerminationPolicy: Redirect
|
||||
wildcardPolicy: None
|
||||
320
openshift-s2i.yaml
Normal file
320
openshift-s2i.yaml
Normal file
@@ -0,0 +1,320 @@
|
||||
# OpenShift S2I Template for ORU Analyzer
|
||||
# Source-to-Image deployment configuration
|
||||
|
||||
apiVersion: template.openshift.io/v1
|
||||
kind: Template
|
||||
metadata:
|
||||
name: oru-analyzer-s2i
|
||||
annotations:
|
||||
description: "ORU Analyzer - OpenShift Resource Usage Analyzer (S2I)"
|
||||
tags: "python,fastapi,openshift,resource-governance,monitoring"
|
||||
iconClass: "icon-python"
|
||||
openshift.io/display-name: "ORU Analyzer (S2I)"
|
||||
openshift.io/long-description: "OpenShift Resource Usage Analyzer using Source-to-Image"
|
||||
openshift.io/provider-display-name: "Red Hat"
|
||||
openshift.io/documentation-url: "https://github.com/andersonid/openshift-resource-governance"
|
||||
openshift.io/support-url: "https://github.com/andersonid/openshift-resource-governance/issues"
|
||||
|
||||
parameters:
|
||||
- name: NAME
|
||||
displayName: "Application Name"
|
||||
description: "The name assigned to all of the frontend objects defined in this template."
|
||||
value: "oru-analyzer"
|
||||
required: true
|
||||
|
||||
- name: NAMESPACE
|
||||
displayName: "Namespace"
|
||||
description: "The OpenShift Namespace where the ImageStream resides."
|
||||
value: "resource-governance"
|
||||
required: true
|
||||
|
||||
- name: GIT_REPOSITORY
|
||||
displayName: "Git Repository URL"
|
||||
description: "The URL of the repository with your application source code."
|
||||
value: "https://github.com/andersonid/openshift-resource-governance.git"
|
||||
required: true
|
||||
|
||||
- name: GIT_REF
|
||||
displayName: "Git Reference"
|
||||
description: "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
|
||||
value: "main"
|
||||
required: true
|
||||
|
||||
- name: PYTHON_VERSION
|
||||
displayName: "Python Version"
|
||||
description: "Version of Python to use."
|
||||
value: "3.11"
|
||||
required: true
|
||||
|
||||
- name: CPU_REQUEST
|
||||
displayName: "CPU Request"
|
||||
description: "The amount of CPU to request for the container."
|
||||
value: "100m"
|
||||
required: true
|
||||
|
||||
- name: CPU_LIMIT
|
||||
displayName: "CPU Limit"
|
||||
description: "The amount of CPU to limit the container to."
|
||||
value: "500m"
|
||||
required: true
|
||||
|
||||
- name: MEMORY_REQUEST
|
||||
displayName: "Memory Request"
|
||||
description: "The amount of memory to request for the container."
|
||||
value: "256Mi"
|
||||
required: true
|
||||
|
||||
- name: MEMORY_LIMIT
|
||||
displayName: "Memory Limit"
|
||||
description: "The amount of memory to limit the container to."
|
||||
value: "1Gi"
|
||||
required: true
|
||||
|
||||
- name: REPLICAS
|
||||
displayName: "Number of Replicas"
|
||||
description: "Number of replicas to run."
|
||||
value: "1"
|
||||
required: true
|
||||
|
||||
- name: ROUTE_HOSTNAME
|
||||
displayName: "Route Hostname"
|
||||
description: "The hostname for the route. Leave blank for auto-generated hostname."
|
||||
value: ""
|
||||
required: false
|
||||
|
||||
objects:
|
||||
# ImageStream for the application
|
||||
- apiVersion: image.openshift.io/v1
|
||||
kind: ImageStream
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
lookupPolicy:
|
||||
local: false
|
||||
|
||||
# BuildConfig for S2I
|
||||
- apiVersion: build.openshift.io/v1
|
||||
kind: BuildConfig
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
source:
|
||||
type: Git
|
||||
git:
|
||||
uri: ${GIT_REPOSITORY}
|
||||
ref: ${GIT_REF}
|
||||
contextDir: /
|
||||
strategy:
|
||||
type: Source
|
||||
sourceStrategy:
|
||||
from:
|
||||
kind: ImageStreamTag
|
||||
namespace: openshift
|
||||
name: python:${PYTHON_VERSION}
|
||||
env:
|
||||
- name: PYTHON_VERSION
|
||||
value: ${PYTHON_VERSION}
|
||||
- name: PIP_INDEX_URL
|
||||
value: "https://pypi.org/simple"
|
||||
output:
|
||||
to:
|
||||
kind: ImageStreamTag
|
||||
name: ${NAME}:latest
|
||||
triggers:
|
||||
- type: ConfigChange
|
||||
- type: ImageChange
|
||||
imageChange: {}
|
||||
|
||||
# Service
|
||||
- apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
|
||||
# DeploymentConfig
|
||||
- apiVersion: apps.openshift.io/v1
|
||||
kind: DeploymentConfig
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
replicas: ${REPLICAS}
|
||||
selector:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: ${NAME}
|
||||
image: ${NAME}:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: PYTHON_VERSION
|
||||
value: ${PYTHON_VERSION}
|
||||
- name: HOST
|
||||
value: "0.0.0.0"
|
||||
- name: PORT
|
||||
value: "8080"
|
||||
- name: WORKERS
|
||||
value: "1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: ${CPU_REQUEST}
|
||||
memory: ${MEMORY_REQUEST}
|
||||
limits:
|
||||
cpu: ${CPU_LIMIT}
|
||||
memory: ${MEMORY_LIMIT}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 30
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
triggers:
|
||||
- type: ConfigChange
|
||||
- type: ImageChange
|
||||
imageChangeParams:
|
||||
automatic: true
|
||||
containerNames:
|
||||
- ${NAME}
|
||||
from:
|
||||
kind: ImageStreamTag
|
||||
name: ${NAME}:latest
|
||||
|
||||
# Route
|
||||
- apiVersion: route.openshift.io/v1
|
||||
kind: Route
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
spec:
|
||||
host: ${ROUTE_HOSTNAME}
|
||||
to:
|
||||
kind: Service
|
||||
name: ${NAME}
|
||||
weight: 100
|
||||
port:
|
||||
targetPort: 8080
|
||||
tls:
|
||||
termination: edge
|
||||
insecureEdgeTerminationPolicy: Redirect
|
||||
|
||||
# ServiceAccount
|
||||
- apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ${NAME}-sa
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
|
||||
# Role
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ${NAME}-role
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "namespaces", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "replicasets"]
|
||||
verbs: ["get", "list", "watch", "patch", "update"]
|
||||
- apiGroups: ["autoscaling.k8s.io"]
|
||||
resources: ["verticalpodautoscalers"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
# RoleBinding
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ${NAME}-rolebinding
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ${NAME}-sa
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: ${NAME}-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
# ConfigMap
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ${NAME}-config
|
||||
namespace: ${NAMESPACE}
|
||||
labels:
|
||||
app: ${NAME}
|
||||
component: backend
|
||||
data:
|
||||
CPU_LIMIT_RATIO: "3.0"
|
||||
MEMORY_LIMIT_RATIO: "3.0"
|
||||
MIN_CPU_REQUEST: "10m"
|
||||
MIN_MEMORY_REQUEST: "32Mi"
|
||||
CRITICAL_NAMESPACES: |
|
||||
openshift-monitoring
|
||||
openshift-ingress
|
||||
openshift-apiserver
|
||||
openshift-controller-manager
|
||||
openshift-sdn
|
||||
PROMETHEUS_URL: "https://prometheus-k8s.openshift-monitoring.svc.cluster.local:9091"
|
||||
LOG_LEVEL: "INFO"
|
||||
HOST: "0.0.0.0"
|
||||
PORT: "8080"
|
||||
WORKERS: "1"
|
||||
193
scripts/deploy-s2i.sh
Executable file
193
scripts/deploy-s2i.sh
Executable file
@@ -0,0 +1,193 @@
|
||||
#!/bin/bash
|
||||
# Deploy ORU Analyzer using Source-to-Image (S2I)
|
||||
# Alternative deployment method for OpenShift
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== ORU Analyzer S2I Deployment Script ==="
|
||||
echo "Deploying ORU Analyzer using Source-to-Image..."
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Default values
|
||||
NAMESPACE="resource-governance"
|
||||
APP_NAME="oru-analyzer"
|
||||
GIT_REPO="https://github.com/andersonid/openshift-resource-governance.git"
|
||||
GIT_REF="main"
|
||||
PYTHON_VERSION="3.11"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
print_status "Checking prerequisites..."
|
||||
|
||||
if ! command_exists oc; then
|
||||
print_error "OpenShift CLI (oc) is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if logged in to OpenShift
|
||||
if ! oc whoami >/dev/null 2>&1; then
|
||||
print_error "Not logged in to OpenShift. Please run 'oc login' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Prerequisites check passed"
|
||||
}
|
||||
|
||||
# Create namespace if it doesn't exist
|
||||
create_namespace() {
|
||||
print_status "Creating namespace '$NAMESPACE' if it doesn't exist..."
|
||||
|
||||
if oc get namespace "$NAMESPACE" >/dev/null 2>&1; then
|
||||
print_warning "Namespace '$NAMESPACE' already exists"
|
||||
else
|
||||
oc new-project "$NAMESPACE"
|
||||
print_success "Namespace '$NAMESPACE' created"
|
||||
fi
|
||||
}
|
||||
|
||||
# Deploy using S2I template
|
||||
deploy_s2i() {
|
||||
print_status "Deploying using S2I template..."
|
||||
|
||||
# Process template with parameters
|
||||
oc process -f openshift-s2i.yaml \
|
||||
-p NAME="$APP_NAME" \
|
||||
-p NAMESPACE="$NAMESPACE" \
|
||||
-p GIT_REPOSITORY="$GIT_REPO" \
|
||||
-p GIT_REF="$GIT_REF" \
|
||||
-p PYTHON_VERSION="$PYTHON_VERSION" \
|
||||
-p CPU_REQUEST="100m" \
|
||||
-p CPU_LIMIT="500m" \
|
||||
-p MEMORY_REQUEST="256Mi" \
|
||||
-p MEMORY_LIMIT="1Gi" \
|
||||
-p REPLICAS="1" \
|
||||
| oc apply -f -
|
||||
|
||||
print_success "S2I template applied successfully"
|
||||
}
|
||||
|
||||
# Wait for build to complete
|
||||
wait_for_build() {
|
||||
print_status "Waiting for build to complete..."
|
||||
|
||||
# Wait for build to start
|
||||
print_status "Waiting for build to start..."
|
||||
oc wait --for=condition=Running buildconfig/"$APP_NAME" --timeout=60s || true
|
||||
|
||||
# Get the latest build
|
||||
BUILD_NAME=$(oc get builds -l buildconfig="$APP_NAME" --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1].metadata.name}')
|
||||
|
||||
if [ -n "$BUILD_NAME" ]; then
|
||||
print_status "Waiting for build '$BUILD_NAME' to complete..."
|
||||
oc logs -f build/"$BUILD_NAME" || true
|
||||
|
||||
# Wait for build to complete
|
||||
oc wait --for=condition=Complete build/"$BUILD_NAME" --timeout=600s || {
|
||||
print_error "Build failed or timed out"
|
||||
print_status "Build logs:"
|
||||
oc logs build/"$BUILD_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print_success "Build completed successfully"
|
||||
else
|
||||
print_warning "No build found, continuing..."
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait for deployment to be ready
|
||||
wait_for_deployment() {
|
||||
print_status "Waiting for deployment to be ready..."
|
||||
|
||||
# Wait for deployment to complete
|
||||
oc rollout status deploymentconfig/"$APP_NAME" --timeout=300s || {
|
||||
print_error "Deployment failed or timed out"
|
||||
print_status "Deployment logs:"
|
||||
oc logs deploymentconfig/"$APP_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print_success "Deployment completed successfully"
|
||||
}
|
||||
|
||||
# Get application URL
|
||||
get_application_url() {
|
||||
print_status "Getting application URL..."
|
||||
|
||||
ROUTE_URL=$(oc get route "$APP_NAME" -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$ROUTE_URL" ]; then
|
||||
print_success "Application deployed successfully!"
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "🚀 ORU Analyzer is now available at:"
|
||||
echo " https://$ROUTE_URL"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "📊 To check the application status:"
|
||||
echo " oc get pods -n $NAMESPACE"
|
||||
echo " oc logs -f deploymentconfig/$APP_NAME -n $NAMESPACE"
|
||||
echo ""
|
||||
echo "🔧 To check the build status:"
|
||||
echo " oc get builds -n $NAMESPACE"
|
||||
echo " oc logs build/<build-name> -n $NAMESPACE"
|
||||
echo ""
|
||||
else
|
||||
print_warning "Could not determine application URL"
|
||||
print_status "Check the route manually:"
|
||||
echo " oc get route -n $NAMESPACE"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main deployment function
|
||||
main() {
|
||||
echo "Starting ORU Analyzer S2I deployment..."
|
||||
echo "=========================================="
|
||||
echo "Namespace: $NAMESPACE"
|
||||
echo "App Name: $APP_NAME"
|
||||
echo "Git Repository: $GIT_REPO"
|
||||
echo "Git Reference: $GIT_REF"
|
||||
echo "Python Version: $PYTHON_VERSION"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
check_prerequisites
|
||||
create_namespace
|
||||
deploy_s2i
|
||||
wait_for_build
|
||||
wait_for_deployment
|
||||
get_application_url
|
||||
|
||||
print_success "ORU Analyzer S2I deployment completed!"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -44,9 +44,9 @@ oc delete -f k8s/route.yaml --ignore-not-found=true
|
||||
echo -e "${YELLOW} 🌐 Removendo Service...${NC}"
|
||||
oc delete -f k8s/service.yaml --ignore-not-found=true
|
||||
|
||||
# Remover DaemonSet
|
||||
echo -e "${YELLOW} 📦 Removendo DaemonSet...${NC}"
|
||||
oc delete -f k8s/daemonset.yaml --ignore-not-found=true
|
||||
# Remover Deployment
|
||||
echo -e "${YELLOW} 📦 Removendo Deployment...${NC}"
|
||||
oc delete -f k8s/deployment.yaml --ignore-not-found=true
|
||||
|
||||
# Aguardar pods serem removidos
|
||||
echo -e "${YELLOW} ⏳ Aguardando pods serem removidos...${NC}"
|
||||
|
||||
Reference in New Issue
Block a user