diff --git a/app/api/routes.py b/app/api/routes.py
index a27ec35..f325795 100644
--- a/app/api/routes.py
+++ b/app/api/routes.py
@@ -17,19 +17,19 @@ from app.services.historical_analysis import HistoricalAnalysisService
logger = logging.getLogger(__name__)
-# Criar router
+# Create router
api_router = APIRouter()
-# Inicializar serviços
+# Initialize services
validation_service = ValidationService()
report_service = ReportService()
def get_k8s_client(request: Request):
- """Dependency para obter cliente Kubernetes"""
+ """Dependency to get Kubernetes client"""
return request.app.state.k8s_client
def get_prometheus_client(request: Request):
- """Dependency para obter cliente Prometheus"""
+ """Dependency to get Prometheus client"""
return request.app.state.prometheus_client
@api_router.get("/cluster/status")
@@ -39,17 +39,17 @@ async def get_cluster_status(
):
"""Get overall cluster status"""
try:
- # Coletar dados básicos
+ # Collect basic data
pods = await k8s_client.get_all_pods()
nodes_info = await k8s_client.get_nodes_info()
- # Validar recursos
+ # Validate resources
all_validations = []
for pod in pods:
pod_validations = validation_service.validate_pod_resources(pod)
all_validations.extend(pod_validations)
- # Obter informações de overcommit
+ # Get overcommit information
overcommit_info = await prometheus_client.get_cluster_overcommit()
# Get VPA recommendations
@@ -78,19 +78,19 @@ async def get_namespace_status(
):
"""Get status of a specific namespace"""
try:
- # Coletar dados do namespace
+ # Collect namespace data
namespace_resources = await k8s_client.get_namespace_resources(namespace)
- # Validar recursos
+ # Validate resources
all_validations = []
for pod in namespace_resources.pods:
pod_validations = validation_service.validate_pod_resources(pod)
all_validations.extend(pod_validations)
- # Obter uso de recursos do Prometheus
+ # Get resource usage from Prometheus
resource_usage = await prometheus_client.get_namespace_resource_usage(namespace)
- # Generate report do namespace
+ # Generate namespace report
report = report_service.generate_namespace_report(
namespace=namespace,
pods=namespace_resources.pods,
@@ -131,26 +131,26 @@ async def get_validations(
):
"""List resource validations with pagination"""
try:
- # Coletar pods
+ # Collect pods
if namespace:
namespace_resources = await k8s_client.get_namespace_resources(namespace)
pods = namespace_resources.pods
else:
pods = await k8s_client.get_all_pods()
- # Validar recursos
+ # Validate resources
all_validations = []
for pod in pods:
pod_validations = validation_service.validate_pod_resources(pod)
all_validations.extend(pod_validations)
- # Filtrar por severidade se especificado
+ # Filter by severity if specified
if severity:
all_validations = [
v for v in all_validations if v.severity == severity
]
- # Paginação
+ # Pagination
total = len(all_validations)
start = (page - 1) * page_size
end = start + page_size
@@ -180,10 +180,10 @@ async def get_validations_by_namespace(
):
"""List validations grouped by namespace with pagination"""
try:
- # Coletar todos os pods com filtro de namespaces do sistema
+ # Collect all pods with system namespace filter
pods = await k8s_client.get_all_pods(include_system_namespaces=include_system_namespaces)
- # Validar recursos e agrupar por namespace
+ # Validate resources and group by namespace
namespace_validations = {}
for pod in pods:
pod_validations = validation_service.validate_pod_resources(pod)
@@ -203,14 +203,14 @@ async def get_validations_by_namespace(
"validations": []
}
- # Filtrar por severidade se especificado
+ # Filter by severity if specified
if severity:
pod_validations = [v for v in pod_validations if v.severity == severity]
namespace_validations[pod.namespace]["pods"][pod.name]["validations"] = pod_validations
namespace_validations[pod.namespace]["total_validations"] += len(pod_validations)
- # Contar severidades
+ # Count severities
for validation in pod_validations:
namespace_validations[pod.namespace]["severity_breakdown"][validation.severity] += 1
@@ -218,7 +218,7 @@ async def get_validations_by_namespace(
namespace_list = list(namespace_validations.values())
namespace_list.sort(key=lambda x: x["total_validations"], reverse=True)
- # Paginação
+ # Pagination
total = len(namespace_list)
start = (page - 1) * page_size
end = start + page_size
@@ -270,17 +270,17 @@ async def export_report(
pods = await k8s_client.get_all_pods()
nodes_info = await k8s_client.get_nodes_info()
- # Filtrar por namespaces se especificado
+ # Filter by namespaces if specified
if export_request.namespaces:
pods = [p for p in pods if p.namespace in export_request.namespaces]
- # Validar recursos
+ # Validate resources
all_validations = []
for pod in pods:
pod_validations = validation_service.validate_pod_resources(pod)
all_validations.extend(pod_validations)
- # Obter informações adicionais
+ # Get additional information
overcommit_info = {}
vpa_recommendations = []
@@ -299,7 +299,7 @@ async def export_report(
nodes_info=nodes_info
)
- # Exportar
+ # Export
filepath = await report_service.export_report(report, export_request)
return {
@@ -331,7 +331,7 @@ async def download_exported_file(filename: str):
file_info = next((f for f in files if f["filename"] == filename), None)
if not file_info:
- raise HTTPException(status_code=404, detail="Arquivo não encontrado")
+ raise HTTPException(status_code=404, detail="File not found")
return FileResponse(
path=file_info["filepath"],
@@ -350,18 +350,18 @@ async def apply_recommendation(
):
"""Apply resource recommendation"""
try:
- # TODO: Implementar aplicação de recomendações
- # Por enquanto, apenas simular
+ # TODO: Implement recommendation application
+ # For now, just simulate
if recommendation.dry_run:
return {
- "message": "Dry run - recomendação seria aplicada",
+ "message": "Dry run - recommendation would be applied",
"pod": recommendation.pod_name,
"namespace": recommendation.namespace,
"container": recommendation.container_name,
"action": f"{recommendation.action} {recommendation.resource_type} = {recommendation.value}"
}
else:
- # Implementar aplicação real da recomendação
+ # Implement real recommendation application
raise HTTPException(status_code=501, detail="Recommendation application not implemented yet")
except Exception as e:
@@ -378,14 +378,14 @@ async def get_historical_validations(
try:
validation_service = ValidationService()
- # Coletar pods
+ # Collect pods
if namespace:
namespace_resources = await k8s_client.get_namespace_resources(namespace)
pods = namespace_resources.pods
else:
pods = await k8s_client.get_all_pods()
- # Validar com análise histórica
+ # Validate with historical analysis
all_validations = []
for pod in pods:
pod_validations = await validation_service.validate_pod_resources_with_historical_analysis(
diff --git a/app/core/config.py b/app/core/config.py
index a96ab41..97eb6f1 100644
--- a/app/core/config.py
+++ b/app/core/config.py
@@ -18,12 +18,12 @@ class Settings(BaseSettings):
prometheus_url: str = "http://prometheus.openshift-monitoring.svc.cluster.local:9090"
# Validation settings
- cpu_limit_ratio: float = 3.0 # Ratio padrão limit:request para CPU
- memory_limit_ratio: float = 3.0 # Ratio padrão limit:request para memória
- min_cpu_request: str = "10m" # Mínimo de CPU request
- min_memory_request: str = "32Mi" # Mínimo de memória request
+ cpu_limit_ratio: float = 3.0 # Default limit:request ratio for CPU
+ memory_limit_ratio: float = 3.0 # Default limit:request ratio for memory
+ min_cpu_request: str = "10m" # Minimum CPU request
+ min_memory_request: str = "32Mi" # Minimum memory request
- # Namespaces críticos para VPA
+ # Critical namespaces for VPA
critical_namespaces: List[str] = [
"openshift-monitoring",
"openshift-ingress",
diff --git a/app/core/kubernetes_client.py b/app/core/kubernetes_client.py
index 6876ade..5631685 100644
--- a/app/core/kubernetes_client.py
+++ b/app/core/kubernetes_client.py
@@ -1,5 +1,5 @@
"""
-Cliente Kubernetes/OpenShift para coleta de dados
+Kubernetes/OpenShift client for data collection
"""
import logging
from typing import List, Dict, Any, Optional
@@ -14,7 +14,7 @@ from app.models.resource_models import PodResource, NamespaceResources, VPARecom
logger = logging.getLogger(__name__)
class K8sClient:
- """Cliente para interação com Kubernetes/OpenShift"""
+ """Client for interaction with Kubernetes/OpenShift"""
def __init__(self):
self.v1 = None
@@ -23,16 +23,16 @@ class K8sClient:
self.initialized = False
async def initialize(self):
- """Inicializar cliente Kubernetes"""
+ """Initialize Kubernetes client"""
try:
- # Tentar carregar configuração do cluster
+ # Try to load cluster configuration
if settings.kubeconfig_path:
config.load_kube_config(config_file=settings.kubeconfig_path)
else:
- # Usar configuração in-cluster
+ # Use in-cluster configuration
config.load_incluster_config()
- # Inicializar clientes da API
+ # Initialize API clients
self.v1 = client.CoreV1Api()
self.autoscaling_v1 = client.AutoscalingV1Api()
self.apps_v1 = client.AppsV1Api()
@@ -45,8 +45,8 @@ class K8sClient:
raise
def _is_system_namespace(self, namespace: str, include_system: bool = None) -> bool:
- """Verificar se um namespace é do sistema"""
- # Usar parâmetro se fornecido, senão usar configuração global
+ """Check if a namespace is a system namespace"""
+ # Use parameter if provided, otherwise use global configuration
should_include = include_system if include_system is not None else settings.include_system_namespaces
if should_include:
@@ -58,18 +58,18 @@ class K8sClient:
return False
async def get_all_pods(self, include_system_namespaces: bool = None) -> List[PodResource]:
- """Coletar informações de todos os pods do cluster"""
+ """Collect information from all pods in the cluster"""
if not self.initialized:
raise RuntimeError("Kubernetes client not initialized")
pods_data = []
try:
- # Listar todos os pods em todos os namespaces
+ # List all pods in all namespaces
pods = self.v1.list_pod_for_all_namespaces(watch=False)
for pod in pods.items:
- # Filtrar namespaces do sistema
+ # Filter system namespaces
if self._is_system_namespace(pod.metadata.namespace, include_system_namespaces):
continue
pod_resource = PodResource(
@@ -80,7 +80,7 @@ class K8sClient:
containers=[]
)
- # Processar containers do pod
+ # Process pod containers
for container in pod.spec.containers:
container_resource = {
"name": container.name,
@@ -91,7 +91,7 @@ class K8sClient:
}
}
- # Extrair requests e limits
+ # Extract requests and limits
if container.resources:
if container.resources.requests:
container_resource["resources"]["requests"] = {
@@ -106,7 +106,7 @@ class K8sClient:
pods_data.append(pod_resource)
- logger.info(f"Coletados {len(pods_data)} pods")
+ logger.info(f"Collected {len(pods_data)} pods")
return pods_data
except ApiException as e:
@@ -114,13 +114,13 @@ class K8sClient:
raise
async def get_namespace_resources(self, namespace: str) -> NamespaceResources:
- """Coletar recursos de um namespace específico"""
+ """Collect resources from a specific namespace"""
if not self.initialized:
raise RuntimeError("Kubernetes client not initialized")
- # Verificar se é namespace do sistema
+ # Check if it's a system namespace
if self._is_system_namespace(namespace):
- logger.info(f"Namespace {namespace} é do sistema, retornando vazio")
+ logger.info(f"Namespace {namespace} is system, returning empty")
return NamespaceResources(
name=namespace,
pods=[],
@@ -131,7 +131,7 @@ class K8sClient:
)
try:
- # Listar pods do namespace
+ # List namespace pods
pods = self.v1.list_namespaced_pod(namespace=namespace)
namespace_resource = NamespaceResources(
@@ -183,28 +183,28 @@ class K8sClient:
raise
async def get_vpa_recommendations(self) -> List[VPARecommendation]:
- """Coletar recomendações do VPA"""
+ """Collect VPA recommendations"""
if not self.initialized:
raise RuntimeError("Kubernetes client not initialized")
recommendations = []
try:
- # VPA não está disponível na API padrão do Kubernetes
- # TODO: Implementar usando Custom Resource Definition (CRD)
- logger.warning("VPA não está disponível na API padrão do Kubernetes")
+ # VPA is not available in the standard Kubernetes API
+ # TODO: Implement using Custom Resource Definition (CRD)
+ logger.warning("VPA is not available in the standard Kubernetes API")
return []
- logger.info(f"Coletadas {len(recommendations)} recomendações VPA")
+ logger.info(f"Collected {len(recommendations)} VPA recommendations")
return recommendations
except ApiException as e:
logger.error(f"Error collecting VPA recommendations: {e}")
- # VPA pode não estar instalado, retornar lista vazia
+ # VPA may not be installed, return empty list
return []
async def get_nodes_info(self) -> List[Dict[str, Any]]:
- """Coletar informações dos nós do cluster"""
+ """Collect cluster node information"""
if not self.initialized:
raise RuntimeError("Kubernetes client not initialized")
@@ -221,19 +221,19 @@ class K8sClient:
"conditions": []
}
- # Capacidade do nó
+ # Node capacity
if node.status.capacity:
node_info["capacity"] = {
k: v for k, v in node.status.capacity.items()
}
- # Recursos alocáveis
+ # Allocatable resources
if node.status.allocatable:
node_info["allocatable"] = {
k: v for k, v in node.status.allocatable.items()
}
- # Condições do nó
+ # Node conditions
if node.status.conditions:
node_info["conditions"] = [
{
diff --git a/app/core/prometheus_client.py b/app/core/prometheus_client.py
index d42a0be..fdd0ff2 100644
--- a/app/core/prometheus_client.py
+++ b/app/core/prometheus_client.py
@@ -1,5 +1,5 @@
"""
-Cliente Prometheus para coleta de métricas
+Prometheus client for metrics collection
"""
import logging
import aiohttp
@@ -12,7 +12,7 @@ from app.core.config import settings
logger = logging.getLogger(__name__)
class PrometheusClient:
- """Cliente para interação com Prometheus"""
+ """Client for Prometheus interaction"""
def __init__(self):
self.base_url = settings.prometheus_url
@@ -20,25 +20,25 @@ class PrometheusClient:
self.initialized = False
async def initialize(self):
- """Inicializar cliente Prometheus"""
+ """Initialize Prometheus client"""
try:
self.session = aiohttp.ClientSession()
- # Testar conexão
+ # Test connection
async with self.session.get(f"{self.base_url}/api/v1/query?query=up") as response:
if response.status == 200:
self.initialized = True
logger.info("Prometheus client initialized successfully")
else:
- logger.warning(f"Prometheus retornou status {response.status}")
+ logger.warning(f"Prometheus returned status {response.status}")
except Exception as e:
logger.error(f"Error initializing Prometheus client: {e}")
- # Prometheus pode não estar disponível, continuar sem ele
+ # Prometheus may not be available, continue without it
self.initialized = False
async def query(self, query: str, time: Optional[datetime] = None) -> Dict[str, Any]:
- """Executar query no Prometheus"""
+ """Execute query in Prometheus"""
if not self.initialized or not self.session:
return {"status": "error", "message": "Prometheus not available"}
@@ -63,17 +63,17 @@ class PrometheusClient:
return {"status": "error", "message": str(e)}
async def get_pod_cpu_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
- """Obter uso de CPU de um pod específico"""
+ """Get CPU usage for a specific pod"""
query = f'rate(container_cpu_usage_seconds_total{{namespace="{namespace}", pod="{pod_name}"}}[5m])'
return await self.query(query)
async def get_pod_memory_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
- """Obter uso de memória de um pod específico"""
+ """Get memory usage for a specific pod"""
query = f'container_memory_working_set_bytes{{namespace="{namespace}", pod="{pod_name}"}}'
return await self.query(query)
async def get_namespace_resource_usage(self, namespace: str) -> Dict[str, Any]:
- """Obter uso de recursos de um namespace"""
+ """Get resource usage of a namespace"""
cpu_query = f'sum(rate(container_cpu_usage_seconds_total{{namespace="{namespace}"}}[5m]))'
memory_query = f'sum(container_memory_working_set_bytes{{namespace="{namespace}"}})'
@@ -86,7 +86,7 @@ class PrometheusClient:
}
async def get_cluster_overcommit(self) -> Dict[str, Any]:
- """Verificar overcommit no cluster"""
+ """Check overcommit in cluster"""
# CPU overcommit
cpu_capacity_query = 'sum(kube_node_status_capacity{resource="cpu"})'
cpu_requests_query = 'sum(kube_pod_container_resource_requests{resource="cpu"})'
@@ -112,7 +112,7 @@ class PrometheusClient:
}
async def get_node_resource_usage(self) -> List[Dict[str, Any]]:
- """Obter uso de recursos por nó"""
+ """Get resource usage by node"""
query = '''
(
kube_node_status_capacity{resource="cpu"} or
@@ -126,6 +126,6 @@ class PrometheusClient:
return result
async def close(self):
- """Fechar sessão HTTP"""
+ """Close HTTP session"""
if self.session:
await self.session.close()
diff --git a/app/main.py b/app/main.py
index f03e0ee..a049a66 100644
--- a/app/main.py
+++ b/app/main.py
@@ -1,6 +1,6 @@
"""
OpenShift Resource Governance Tool
-Aplicação para governança de recursos no cluster OpenShift
+Application for resource governance in OpenShift cluster
"""
import os
import logging
@@ -14,7 +14,7 @@ from app.api.routes import api_router
from app.core.kubernetes_client import K8sClient
from app.core.prometheus_client import PrometheusClient
-# Configuração de logging
+# Logging configuration
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
@@ -23,10 +23,10 @@ logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI):
- """Inicialização e cleanup da aplicação"""
- logger.info("Iniciando OpenShift Resource Governance Tool")
+ """Application initialization and cleanup"""
+ logger.info("Starting OpenShift Resource Governance Tool")
- # Inicializar clientes
+ # Initialize clients
app.state.k8s_client = K8sClient()
app.state.prometheus_client = PrometheusClient()
@@ -40,25 +40,25 @@ async def lifespan(app: FastAPI):
yield
- logger.info("Finalizando aplicação")
+ logger.info("Shutting down application")
-# Criar aplicação FastAPI
+# Create FastAPI application
app = FastAPI(
title="OpenShift Resource Governance Tool",
- description="Ferramenta de governança de recursos para clusters OpenShift",
+ description="Resource governance tool for OpenShift clusters",
version="1.0.0",
lifespan=lifespan
)
-# Incluir rotas da API
+# Include API routes
app.include_router(api_router, prefix="/api/v1")
-# Servir arquivos estáticos
+# Serve static files
app.mount("/static", StaticFiles(directory="app/static"), name="static")
@app.get("/", response_class=HTMLResponse)
async def root():
- """Página principal da aplicação"""
+ """Main application page"""
with open("app/static/index.html", "r") as f:
return HTMLResponse(content=f.read())
diff --git a/app/models/resource_models.py b/app/models/resource_models.py
index 74a2b1a..5e005b9 100644
--- a/app/models/resource_models.py
+++ b/app/models/resource_models.py
@@ -1,17 +1,17 @@
"""
-Modelos de dados para recursos Kubernetes
+Data models for Kubernetes resources
"""
from typing import List, Dict, Any, Optional
from pydantic import BaseModel
class ContainerResource(BaseModel):
- """Recursos de um container"""
+ """Container resources"""
name: str
image: str
resources: Dict[str, Dict[str, str]]
class PodResource(BaseModel):
- """Recursos de um pod"""
+ """Pod resources"""
name: str
namespace: str
node_name: Optional[str] = None
@@ -19,7 +19,7 @@ class PodResource(BaseModel):
containers: List[ContainerResource]
class NamespaceResources(BaseModel):
- """Recursos de um namespace"""
+ """Namespace resources"""
name: str
pods: List[PodResource]
total_cpu_requests: str = "0"
@@ -28,14 +28,14 @@ class NamespaceResources(BaseModel):
total_memory_limits: str = "0"
class VPARecommendation(BaseModel):
- """Recomendação do VPA"""
+ """VPA recommendation"""
name: str
namespace: str
target_ref: Dict[str, str]
recommendations: Dict[str, Any]
class ResourceValidation(BaseModel):
- """Resultado de validação de recursos"""
+ """Resource validation result"""
pod_name: str
namespace: str
container_name: str
@@ -72,7 +72,7 @@ class ExportRequest(BaseModel):
include_validations: bool = True
class ApplyRecommendationRequest(BaseModel):
- """Request para aplicar recomendação"""
+ """Request to apply recommendation"""
pod_name: str
namespace: str
container_name: str
diff --git a/app/services/historical_analysis.py b/app/services/historical_analysis.py
index c610bdf..6df141c 100644
--- a/app/services/historical_analysis.py
+++ b/app/services/historical_analysis.py
@@ -1,5 +1,5 @@
"""
-Serviço de análise histórica usando métricas do Prometheus
+Historical analysis service using Prometheus metrics
"""
import logging
import asyncio
@@ -14,16 +14,16 @@ from app.core.config import settings
logger = logging.getLogger(__name__)
class HistoricalAnalysisService:
- """Serviço para análise histórica de recursos usando Prometheus"""
+ """Service for historical resource analysis using Prometheus"""
def __init__(self):
self.prometheus_url = settings.prometheus_url
self.time_ranges = {
- '1h': 3600, # 1 hora
- '6h': 21600, # 6 horas
- '24h': 86400, # 24 horas
- '7d': 604800, # 7 dias
- '30d': 2592000 # 30 dias
+ '1h': 3600, # 1 hour
+ '6h': 21600, # 6 hours
+ '24h': 86400, # 24 hours
+ '7d': 604800, # 7 days
+ '30d': 2592000 # 30 days
}
async def analyze_pod_historical_usage(
@@ -31,7 +31,7 @@ class HistoricalAnalysisService:
pod: PodResource,
time_range: str = '24h'
) -> List[ResourceValidation]:
- """Analisar uso histórico de um pod"""
+ """Analyze historical usage of a pod"""
validations = []
if time_range not in self.time_ranges:
@@ -41,13 +41,13 @@ class HistoricalAnalysisService:
start_time = end_time - timedelta(seconds=self.time_ranges[time_range])
try:
- # Analisar CPU
+ # Analyze CPU
cpu_analysis = await self._analyze_cpu_usage(
pod, start_time, end_time, time_range
)
validations.extend(cpu_analysis)
- # Analisar memória
+ # Analyze memory
memory_analysis = await self._analyze_memory_usage(
pod, start_time, end_time, time_range
)
@@ -74,14 +74,14 @@ class HistoricalAnalysisService:
end_time: datetime,
time_range: str
) -> List[ResourceValidation]:
- """Analisar uso histórico de CPU"""
+ """Analyze historical CPU usage"""
validations = []
for container in pod.containers:
container_name = container["name"]
try:
- # Query para CPU usage rate
+ # Query for CPU usage rate
cpu_query = f'''
rate(container_cpu_usage_seconds_total{{
pod="{pod.name}",
@@ -92,7 +92,7 @@ class HistoricalAnalysisService:
}}[{time_range}])
'''
- # Query para CPU requests
+ # Query for CPU requests
cpu_requests_query = f'''
kube_pod_container_resource_requests{{
pod="{pod.name}",
@@ -101,7 +101,7 @@ class HistoricalAnalysisService:
}}
'''
- # Query para CPU limits
+ # Query for CPU limits
cpu_limits_query = f'''
kube_pod_container_resource_limits{{
pod="{pod.name}",
@@ -110,7 +110,7 @@ class HistoricalAnalysisService:
}}
'''
- # Executar queries
+ # Execute queries
cpu_usage = await self._query_prometheus(cpu_query, start_time, end_time)
cpu_requests = await self._query_prometheus(cpu_requests_query, start_time, end_time)
cpu_limits = await self._query_prometheus(cpu_limits_query, start_time, end_time)
@@ -134,14 +134,14 @@ class HistoricalAnalysisService:
end_time: datetime,
time_range: str
) -> List[ResourceValidation]:
- """Analisar uso histórico de memória"""
+ """Analyze historical memory usage"""
validations = []
for container in pod.containers:
container_name = container["name"]
try:
- # Query para memória usage
+ # Query for memory usage
memory_query = f'''
container_memory_working_set_bytes{{
pod="{pod.name}",
@@ -152,7 +152,7 @@ class HistoricalAnalysisService:
}}
'''
- # Query para memória requests
+ # Query for memory requests
memory_requests_query = f'''
kube_pod_container_resource_requests{{
pod="{pod.name}",
@@ -161,7 +161,7 @@ class HistoricalAnalysisService:
}}
'''
- # Query para memória limits
+ # Query for memory limits
memory_limits_query = f'''
kube_pod_container_resource_limits{{
pod="{pod.name}",
@@ -170,7 +170,7 @@ class HistoricalAnalysisService:
}}
'''
- # Executar queries
+ # Execute queries
memory_usage = await self._query_prometheus(memory_query, start_time, end_time)
memory_requests = await self._query_prometheus(memory_requests_query, start_time, end_time)
memory_limits = await self._query_prometheus(memory_limits_query, start_time, end_time)
@@ -197,22 +197,22 @@ class HistoricalAnalysisService:
limits_data: List[Dict],
time_range: str
) -> List[ResourceValidation]:
- """Analisar métricas de CPU"""
+ """Analyze CPU metrics"""
validations = []
if not usage_data or not requests_data:
return validations
- # Calcular estatísticas de uso
+ # Calculate usage statistics
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
if not usage_values:
return validations
- # Valores atuais de requests/limits
+ # Current values of requests/limits
current_requests = float(requests_data[0][1]) if requests_data else 0
current_limits = float(limits_data[0][1]) if limits_data else 0
- # Estatísticas de uso
+ # Usage statistics
avg_usage = sum(usage_values) / len(usage_values)
max_usage = max(usage_values)
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
@@ -282,28 +282,28 @@ class HistoricalAnalysisService:
limits_data: List[Dict],
time_range: str
) -> List[ResourceValidation]:
- """Analisar métricas de memória"""
+ """Analyze memory metrics"""
validations = []
if not usage_data or not requests_data:
return validations
- # Calcular estatísticas de uso
+ # Calculate usage statistics
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
if not usage_values:
return validations
- # Valores atuais de requests/limits (em bytes)
+ # Current values of requests/limits (in bytes)
current_requests = float(requests_data[0][1]) if requests_data else 0
current_limits = float(limits_data[0][1]) if limits_data else 0
- # Estatísticas de uso
+ # Usage statistics
avg_usage = sum(usage_values) / len(usage_values)
max_usage = max(usage_values)
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
p99_usage = sorted(usage_values)[int(len(usage_values) * 0.99)]
- # Converter para MiB para melhor legibilidade
+ # Convert to MiB for better readability
def bytes_to_mib(bytes_value):
return bytes_value / (1024 * 1024)
@@ -362,14 +362,14 @@ class HistoricalAnalysisService:
return validations
async def _query_prometheus(self, query: str, start_time: datetime, end_time: datetime) -> List[Dict]:
- """Executar query no Prometheus"""
+ """Execute query in Prometheus"""
try:
async with aiohttp.ClientSession() as session:
params = {
'query': query,
'start': start_time.timestamp(),
'end': end_time.timestamp(),
- 'step': '60s' # 1 minuto de resolução
+ 'step': '60s' # 1 minute resolution
}
async with session.get(
@@ -389,9 +389,9 @@ class HistoricalAnalysisService:
return []
async def get_cluster_historical_summary(self, time_range: str = '24h') -> Dict[str, Any]:
- """Obter resumo histórico do cluster"""
+ """Get cluster historical summary"""
try:
- # Query para CPU total do cluster
+ # Query for total cluster CPU
cpu_query = f'''
sum(rate(container_cpu_usage_seconds_total{{
container!="POD",
@@ -399,7 +399,7 @@ class HistoricalAnalysisService:
}}[{time_range}]))
'''
- # Query para memória total do cluster
+ # Query for total cluster memory
memory_query = f'''
sum(container_memory_working_set_bytes{{
container!="POD",
@@ -407,7 +407,7 @@ class HistoricalAnalysisService:
}})
'''
- # Query para requests totais
+ # Query for total requests
cpu_requests_query = f'''
sum(kube_pod_container_resource_requests{{resource="cpu"}})
'''
@@ -416,7 +416,7 @@ class HistoricalAnalysisService:
sum(kube_pod_container_resource_requests{{resource="memory"}})
'''
- # Executar queries
+ # Execute queries
cpu_usage = await self._query_prometheus(cpu_query,
datetime.now() - timedelta(seconds=self.time_ranges[time_range]),
datetime.now())
diff --git a/app/services/report_service.py b/app/services/report_service.py
index bb35f3e..857c4c3 100644
--- a/app/services/report_service.py
+++ b/app/services/report_service.py
@@ -181,7 +181,7 @@ class ReportService:
filename = f"cluster_report_{timestamp}.json"
filepath = os.path.join(self.export_path, filename)
- # Converter para dict para serialização
+ # Convert to dict for serialization
report_dict = report.dict()
with open(filepath, 'w', encoding='utf-8') as f:
@@ -198,7 +198,7 @@ class ReportService:
with open(filepath, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
- # Cabeçalho
+ # Header
writer.writerow([
"Pod Name", "Namespace", "Container Name",
"Validation Type", "Severity", "Message", "Recommendation"
@@ -234,12 +234,12 @@ class ReportService:
styles = getSampleStyleSheet()
story = []
- # Título
+ # Title
title = Paragraph("OpenShift Resource Governance Report", styles['Title'])
story.append(title)
story.append(Spacer(1, 12))
- # Resumo
+ # Summary
summary_text = f"""
Cluster Summary:
Total Pods: {report.total_pods}
@@ -276,7 +276,7 @@ class ReportService:
('GRID', (0, 0), (-1, -1), 1, colors.black)
]))
- story.append(Paragraph("Validações:", styles['Heading2']))
+ story.append(Paragraph("Validations:", styles['Heading2']))
story.append(table)
doc.build(story)
diff --git a/app/services/validation_service.py b/app/services/validation_service.py
index 270bb4c..99c27bd 100644
--- a/app/services/validation_service.py
+++ b/app/services/validation_service.py
@@ -40,10 +40,10 @@ class ValidationService:
time_range: str = '24h'
) -> List[ResourceValidation]:
"""Validate pod resources including historical analysis"""
- # Validações estáticas
+ # Static validations
static_validations = self.validate_pod_resources(pod)
- # Análise histórica
+ # Historical analysis
try:
historical_validations = await self.historical_analysis.analyze_pod_historical_usage(
pod, time_range
@@ -66,7 +66,7 @@ class ValidationService:
requests = resources.get("requests", {})
limits = resources.get("limits", {})
- # 1. Verificar se requests estão definidos
+ # 1. Check if requests are defined
if not requests:
validations.append(ResourceValidation(
pod_name=pod_name,
@@ -78,7 +78,7 @@ class ValidationService:
recommendation="Define CPU and memory requests to guarantee QoS"
))
- # 2. Verificar se limits estão definidos
+ # 2. Check if limits are defined
if not limits:
validations.append(ResourceValidation(
pod_name=pod_name,
@@ -213,7 +213,7 @@ class ValidationService:
"""Validate minimum request values"""
validations = []
- # Validar CPU mínima
+ # Validate minimum CPU
if "cpu" in requests:
try:
request_value = self._parse_cpu_value(requests["cpu"])
@@ -232,7 +232,7 @@ class ValidationService:
except (ValueError, InvalidOperation):
pass
- # Validar memória mínima
+ # Validate minimum memory
if "memory" in requests:
try:
request_value = self._parse_memory_value(requests["memory"])
@@ -254,7 +254,7 @@ class ValidationService:
return validations
def _parse_cpu_value(self, value: str) -> float:
- """Converter valor de CPU para float (cores)"""
+ """Convert CPU value to float (cores)"""
if value.endswith('m'):
return float(value[:-1]) / 1000
elif value.endswith('n'):
@@ -263,7 +263,7 @@ class ValidationService:
return float(value)
def _parse_memory_value(self, value: str) -> int:
- """Converter valor de memória para bytes"""
+ """Convert memory value to bytes"""
value = value.upper()
if value.endswith('KI'):
@@ -289,15 +289,15 @@ class ValidationService:
"""Validate overcommit in a namespace"""
validations = []
- # Calcular total de requests do namespace
+ # Calculate total namespace requests
total_cpu_requests = self._parse_cpu_value(namespace_resources.total_cpu_requests)
total_memory_requests = self._parse_memory_value(namespace_resources.total_memory_requests)
- # Calcular capacidade total dos nós
+ # Calculate total node capacity
total_cpu_capacity = self._parse_cpu_value(node_capacity.get("cpu", "0"))
total_memory_capacity = self._parse_memory_value(node_capacity.get("memory", "0"))
- # Verificar overcommit de CPU
+ # Check CPU overcommit
if total_cpu_capacity > 0:
cpu_utilization = (total_cpu_requests / total_cpu_capacity) * 100
if cpu_utilization > 100:
@@ -311,7 +311,7 @@ class ValidationService:
recommendation="Reduce CPU requests or add more nodes to the cluster"
))
- # Verificar overcommit de memória
+ # Check memory overcommit
if total_memory_capacity > 0:
memory_utilization = (total_memory_requests / total_memory_capacity) * 100
if memory_utilization > 100:
@@ -331,7 +331,7 @@ class ValidationService:
"""Generate recommendations based on validations"""
recommendations = []
- # Agrupar validações por tipo
+ # Group validations by type
validation_counts = {}
for validation in validations:
validation_type = validation.validation_type
@@ -339,7 +339,7 @@ class ValidationService:
validation_counts[validation_type] = 0
validation_counts[validation_type] += 1
- # Gerar recomendações baseadas nos problemas encontrados
+ # Generate recommendations based on found issues
if validation_counts.get("missing_requests", 0) > 0:
recommendations.append(
f"Implement LimitRange in namespace to define default requests "
diff --git a/deploy-local.sh b/deploy-local.sh
index 77af9ae..8e41fba 100755
--- a/deploy-local.sh
+++ b/deploy-local.sh
@@ -1,66 +1,66 @@
#!/bin/bash
-# Script de deploy local para OpenShift
-# Uso: ./deploy-local.sh [TAG_DA_IMAGEM]
+# Local deployment script for OpenShift
+# Usage: ./deploy-local.sh [IMAGE_TAG]
set -e
-# Configurações
+# Configuration
IMAGE_NAME="resource-governance"
REGISTRY="andersonid"
NAMESPACE="resource-governance"
TAG=${1:-"latest"}
-echo "🚀 Deploy Local para OpenShift"
-echo "================================"
-echo "Imagem: $REGISTRY/$IMAGE_NAME:$TAG"
+echo "Local Deploy to OpenShift"
+echo "========================="
+echo "Image: $REGISTRY/$IMAGE_NAME:$TAG"
echo "Namespace: $NAMESPACE"
echo ""
-# Verificar se está logado no OpenShift
+# Check if logged into OpenShift
if ! oc whoami > /dev/null 2>&1; then
- echo "❌ Não está logado no OpenShift. Execute: oc login"
+ echo "ERROR: Not logged into OpenShift. Run: oc login"
exit 1
fi
-echo "✅ Logado no OpenShift como: $(oc whoami)"
+echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
echo ""
-# Aplicar manifests
-echo "📋 Aplicando manifests..."
+# Apply manifests
+echo "Applying manifests..."
oc apply -f k8s/namespace.yaml
oc apply -f k8s/rbac.yaml
oc apply -f k8s/configmap.yaml
-# Atualizar imagem do deployment
-echo "🔄 Atualizando imagem do deployment..."
+# Update deployment image
+echo "Updating deployment image..."
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$REGISTRY/$IMAGE_NAME:$TAG -n $NAMESPACE || true
-# Aplicar deployment, service e route
-echo "📦 Aplicando deployment, service e route..."
+# Apply deployment, service and route
+echo "Applying deployment, service and route..."
oc apply -f k8s/deployment.yaml
oc apply -f k8s/service.yaml
oc apply -f k8s/route.yaml
-# Aguardar rollout
-echo "⏳ Aguardando rollout..."
+# Wait for rollout
+echo "Waiting for rollout..."
oc rollout status deployment/$IMAGE_NAME -n $NAMESPACE --timeout=300s
-# Verificar deployment
-echo "✅ Verificando deployment..."
+# Verify deployment
+echo "Verifying deployment..."
oc get deployment $IMAGE_NAME -n $NAMESPACE
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
-# Obter URL da rota
+# Get route URL
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
if [ -n "$ROUTE_URL" ]; then
echo ""
- echo "🚀 Application deployed successfully!"
- echo "🌐 URL: https://$ROUTE_URL"
- echo "📊 Status: oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME"
+ echo "Application deployed successfully!"
+ echo "URL: https://$ROUTE_URL"
+ echo "Status: oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME"
else
- echo "⚠️ Rota não encontrada. Verifique: oc get routes -n $NAMESPACE"
+ echo "WARNING: Route not found. Check: oc get routes -n $NAMESPACE"
fi
echo ""
-echo "✅ Deploy concluído!"
+echo "Deploy completed!"
diff --git a/deploy-to-cluster.sh b/deploy-to-cluster.sh
index 04e6bbb..9aacf2c 100755
--- a/deploy-to-cluster.sh
+++ b/deploy-to-cluster.sh
@@ -1,82 +1,82 @@
#!/bin/bash
-# Script para deploy da aplicação OpenShift Resource Governance
-# Funciona com qualquer cluster OpenShift (público ou privado)
+# Script for deploying OpenShift Resource Governance application
+# Works with any OpenShift cluster (public or private)
-# Variáveis
+# Variables
IMAGE_NAME="resource-governance"
NAMESPACE="resource-governance"
-IMAGE_TAG=${1:-latest} # Usa o primeiro argumento como tag, ou 'latest' por padrão
+IMAGE_TAG=${1:-latest} # Use first argument as tag, or 'latest' by default
-echo "🚀 Deploy para OpenShift Cluster"
-echo "================================"
-echo "Imagem: ${IMAGE_TAG}"
+echo "Deploy to OpenShift Cluster"
+echo "==========================="
+echo "Image: ${IMAGE_TAG}"
echo "Namespace: ${NAMESPACE}"
echo ""
-# 1. Verificar login no OpenShift
+# 1. Check OpenShift login
if ! oc whoami > /dev/null 2>&1; then
- echo "❌ Não logado no OpenShift. Por favor, faça login com 'oc login'."
- echo "💡 Exemplo: oc login https://your-cluster.com"
+ echo "ERROR: Not logged into OpenShift. Please login with 'oc login'."
+ echo "Example: oc login https://your-cluster.com"
exit 1
fi
-echo "✅ Logado no OpenShift como: $(oc whoami)"
+echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
echo ""
-# 2. Verificar se o namespace existe, senão criar
+# 2. Check if namespace exists, create if not
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
- echo "📋 Criando namespace ${NAMESPACE}..."
+ echo "Creating namespace ${NAMESPACE}..."
oc create namespace ${NAMESPACE}
else
- echo "✅ Namespace ${NAMESPACE} já existe"
+ echo "SUCCESS: Namespace ${NAMESPACE} already exists"
fi
echo ""
-# 3. Aplicar manifests básicos (rbac, configmap)
-echo "📋 Aplicando manifests..."
+# 3. Apply basic manifests (rbac, configmap)
+echo "Applying manifests..."
oc apply -f k8s/rbac.yaml
oc apply -f k8s/configmap.yaml
echo ""
-# 4. Atualizar deployment com a nova imagem
-echo "🔄 Atualizando imagem do deployment..."
+# 4. Update deployment with new image
+echo "Updating deployment image..."
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${IMAGE_TAG} -n ${NAMESPACE} || true
echo ""
-# 5. Aplicar deployment, service e route
-echo "📦 Aplicando deployment, service e route..."
+# 5. Apply deployment, service and route
+echo "Applying deployment, service and route..."
oc apply -f k8s/deployment.yaml
oc apply -f k8s/service.yaml
oc apply -f k8s/route.yaml
echo ""
-# 6. Aguardar rollout
-echo "⏳ Aguardando rollout..."
+# 6. Wait for rollout
+echo "Waiting for rollout..."
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
-echo "✅ Rollout concluído com sucesso!"
+echo "SUCCESS: Rollout completed successfully!"
echo ""
-# 7. Verificar deployment
-echo "✅ Verificando deployment..."
+# 7. Verify deployment
+echo "Verifying deployment..."
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
echo ""
-# 8. Obter URL da rota
+# 8. Get route URL
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
if [ -n "$ROUTE_URL" ]; then
- echo "🚀 Application deployed successfully!"
- echo "🌐 URL: https://$ROUTE_URL"
- echo "📊 Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
+ echo "Application deployed successfully!"
+ echo "URL: https://$ROUTE_URL"
+ echo "Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
else
- echo "⚠️ Rota não encontrada. Verifique se o cluster suporta Routes."
- echo "💡 Para acessar localmente: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
+ echo "WARNING: Route not found. Check if cluster supports Routes."
+ echo "For local access: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
fi
echo ""
-echo "✅ Deploy concluído!"
+echo "Deploy completed!"
echo ""
-echo "🔧 Comandos úteis:"
-echo " Ver logs: oc logs -f deployment/${IMAGE_NAME} -n ${NAMESPACE}"
+echo "Useful commands:"
+echo " View logs: oc logs -f deployment/${IMAGE_NAME} -n ${NAMESPACE}"
echo " Port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
echo " Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
diff --git a/deploy-zero-downtime.sh b/deploy-zero-downtime.sh
index 82baa8a..1784fda 100755
--- a/deploy-zero-downtime.sh
+++ b/deploy-zero-downtime.sh
@@ -1,145 +1,145 @@
#!/bin/bash
-# Script de deploy com ZERO DOWNTIME (Blue-Green Strategy)
-# Garante que a aplicação nunca saia do ar durante atualizações
+# Zero downtime deployment script (Blue-Green Strategy)
+# Ensures application never goes down during updates
set -e
-# Configurações
+# Configuration
IMAGE_NAME="resource-governance"
REGISTRY="andersonid"
NAMESPACE="resource-governance"
TAG=${1:-"latest"}
FULL_IMAGE="$REGISTRY/$IMAGE_NAME:$TAG"
-echo "🚀 Deploy ZERO DOWNTIME para OpenShift"
-echo "======================================"
-echo "Imagem: $FULL_IMAGE"
+echo "Zero Downtime Deploy to OpenShift"
+echo "================================="
+echo "Image: $FULL_IMAGE"
echo "Namespace: $NAMESPACE"
-echo "Estratégia: Blue-Green (Zero Downtime)"
+echo "Strategy: Blue-Green (Zero Downtime)"
echo ""
-# Verificar se está logado no OpenShift
+# Check if logged into OpenShift
if ! oc whoami > /dev/null 2>&1; then
- echo "❌ Não está logado no OpenShift. Execute: oc login"
+ echo "ERROR: Not logged into OpenShift. Run: oc login"
exit 1
fi
-echo "✅ Logado no OpenShift como: $(oc whoami)"
+echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
echo ""
-# Função para verificar se todos os pods estão prontos
+# Function to check if all pods are ready
check_pods_ready() {
local deployment=$1
local namespace=$2
local timeout=${3:-300}
- echo "⏳ Aguardando pods do deployment $deployment ficarem prontos..."
+ echo "Waiting for deployment $deployment pods to be ready..."
oc rollout status deployment/$deployment -n $namespace --timeout=${timeout}s
}
-# Função para verificar se a aplicação está respondendo
+# Function to check if application is responding
check_app_health() {
local service=$1
local namespace=$2
local port=${3:-8080}
- echo "🔍 Verificando saúde da aplicação..."
+ echo "Checking application health..."
- # Tentar port-forward temporário para testar
+ # Try temporary port-forward for testing
local temp_pid
oc port-forward service/$service $port:$port -n $namespace > /dev/null 2>&1 &
temp_pid=$!
- # Aguardar port-forward inicializar
+ # Wait for port-forward to initialize
sleep 3
- # Testar health check
+ # Test health check
local health_status
health_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:$port/api/v1/health 2>/dev/null || echo "000")
- # Parar port-forward temporário
+ # Stop temporary port-forward
kill $temp_pid 2>/dev/null || true
if [ "$health_status" = "200" ]; then
- echo "✅ Aplicação saudável (HTTP $health_status)"
+ echo "SUCCESS: Application healthy (HTTP $health_status)"
return 0
else
- echo "❌ Aplicação não saudável (HTTP $health_status)"
+ echo "ERROR: Application not healthy (HTTP $health_status)"
return 1
fi
}
-# Aplicar manifests básicos
-echo "📋 Aplicando manifests básicos..."
+# Apply basic manifests
+echo "Applying basic manifests..."
oc apply -f k8s/namespace.yaml
oc apply -f k8s/rbac.yaml
oc apply -f k8s/configmap.yaml
-# Verificar se o deployment existe
+# Check if deployment exists
if oc get deployment $IMAGE_NAME -n $NAMESPACE > /dev/null 2>&1; then
- echo "🔄 Deployment existente encontrado. Iniciando atualização zero-downtime..."
+ echo "Existing deployment found. Starting zero-downtime update..."
- # Obter número atual de réplicas
+ # Get current replica count
CURRENT_REPLICAS=$(oc get deployment $IMAGE_NAME -n $NAMESPACE -o jsonpath='{.spec.replicas}')
- echo "📊 Réplicas atuais: $CURRENT_REPLICAS"
+ echo "Current replicas: $CURRENT_REPLICAS"
- # Atualizar imagem do deployment
- echo "🔄 Atualizando imagem para: $FULL_IMAGE"
+ # Update deployment image
+ echo "Updating image to: $FULL_IMAGE"
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$FULL_IMAGE -n $NAMESPACE
- # Aguardar rollout com timeout maior
- echo "⏳ Aguardando rollout (pode levar alguns minutos)..."
+ # Wait for rollout with longer timeout
+ echo "Waiting for rollout (may take a few minutes)..."
if check_pods_ready $IMAGE_NAME $NAMESPACE 600; then
- echo "✅ Rollout concluído com sucesso!"
+ echo "SUCCESS: Rollout completed successfully!"
- # Verificar saúde da aplicação
+ # Check application health
if check_app_health "${IMAGE_NAME}-service" $NAMESPACE; then
- echo "🎉 Deploy zero-downtime concluído com sucesso!"
+ echo "Zero downtime deploy completed successfully!"
else
- echo "⚠️ Deploy concluído, mas aplicação pode não estar saudável"
- echo "🔍 Verifique os logs: oc logs -f deployment/$IMAGE_NAME -n $NAMESPACE"
+ echo "WARNING: Deploy completed, but application may not be healthy"
+ echo "Check logs: oc logs -f deployment/$IMAGE_NAME -n $NAMESPACE"
fi
else
- echo "❌ Rollout falhou ou timeout"
- echo "🔍 Verificando status dos pods:"
+ echo "ERROR: Rollout failed or timeout"
+ echo "Checking pod status:"
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
exit 1
fi
else
- echo "🆕 Deployment não existe. Criando novo deployment..."
+ echo "Deployment does not exist. Creating new deployment..."
oc apply -f k8s/deployment.yaml
oc apply -f k8s/service.yaml
oc apply -f k8s/route.yaml
- # Aguardar pods ficarem prontos
+ # Wait for pods to be ready
if check_pods_ready $IMAGE_NAME $NAMESPACE 300; then
- echo "✅ Novo deployment criado com sucesso!"
+ echo "SUCCESS: New deployment created successfully!"
else
- echo "❌ Falha ao criar deployment"
+ echo "ERROR: Failed to create deployment"
exit 1
fi
fi
-# Verificar status final
+# Check final status
echo ""
-echo "📊 STATUS FINAL:"
-echo "================"
+echo "FINAL STATUS:"
+echo "============="
oc get deployment $IMAGE_NAME -n $NAMESPACE
echo ""
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
echo ""
-# Obter URL da rota
+# Get route URL
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
if [ -n "$ROUTE_URL" ]; then
- echo "🌐 URLs de acesso:"
+ echo "Access URLs:"
echo " OpenShift: https://$ROUTE_URL"
- echo " Port-forward: http://localhost:8080 (se ativo)"
+ echo " Port-forward: http://localhost:8080 (if active)"
echo ""
- echo "💡 Para iniciar port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n $NAMESPACE"
+ echo "To start port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n $NAMESPACE"
fi
echo ""
-echo "✅ Deploy zero-downtime concluído!"
-echo "🔄 Estratégia: Rolling Update com maxUnavailable=0 (zero downtime)"
+echo "Zero downtime deploy completed!"
+echo "Strategy: Rolling Update with maxUnavailable=0 (zero downtime)"
diff --git a/openshift-deploy.sh b/openshift-deploy.sh
index 4973fb9..575ff6e 100755
--- a/openshift-deploy.sh
+++ b/openshift-deploy.sh
@@ -1,95 +1,95 @@
#!/bin/bash
-# Script de deploy para OpenShift usando GitHub
+# Deploy script for OpenShift using GitHub
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Configurações
+# Configuration
REPO_URL="https://github.com/andersonid/openshift-resource-governance.git"
IMAGE_NAME="resource-governance"
REGISTRY="andersonid"
TAG="${1:-latest}"
NAMESPACE="resource-governance"
-echo -e "${BLUE}🚀 Deploying OpenShift Resource Governance Tool from GitHub${NC}"
+echo -e "${BLUE}Deploying OpenShift Resource Governance Tool from GitHub${NC}"
echo -e "${BLUE}Repository: ${REPO_URL}${NC}"
echo -e "${BLUE}Image: ${REGISTRY}/${IMAGE_NAME}:${TAG}${NC}"
-# Verificar se oc está instalado
+# Check if oc is installed
if ! command -v oc &> /dev/null; then
- echo -e "${RED}❌ OpenShift CLI (oc) não está instalado.${NC}"
- echo -e "${YELLOW}Instale o oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
+ echo -e "${RED}ERROR: OpenShift CLI (oc) is not installed.${NC}"
+ echo -e "${YELLOW}Install oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
exit 1
fi
-# Verificar se está logado no OpenShift
+# Check if logged into OpenShift
if ! oc whoami &> /dev/null; then
- echo -e "${RED}❌ Não está logado no OpenShift.${NC}"
- echo -e "${YELLOW}Faça login com: oc login ${NC}"
+ echo -e "${RED}ERROR: Not logged into OpenShift.${NC}"
+ echo -e "${YELLOW}Login with: oc login ${NC}"
exit 1
fi
-echo -e "${GREEN}✅ Logado como: $(oc whoami)${NC}"
+echo -e "${GREEN}SUCCESS: Logged in as: $(oc whoami)${NC}"
-# Criar namespace se não existir
-echo -e "${YELLOW}📁 Creating namespace...${NC}"
+# Create namespace if it doesn't exist
+echo -e "${YELLOW}Creating namespace...${NC}"
oc apply -f k8s/namespace.yaml
-# Aplicar RBAC
-echo -e "${YELLOW}🔐 Applying RBAC...${NC}"
+# Apply RBAC
+echo -e "${YELLOW}Applying RBAC...${NC}"
oc apply -f k8s/rbac.yaml
-# Aplicar ConfigMap
-echo -e "${YELLOW}⚙️ Applying ConfigMap...${NC}"
+# Apply ConfigMap
+echo -e "${YELLOW}Applying ConfigMap...${NC}"
oc apply -f k8s/configmap.yaml
-# Atualizar imagem no DaemonSet
-echo -e "${YELLOW}🔄 Updating image in DaemonSet...${NC}"
+# Update image in DaemonSet
+echo -e "${YELLOW}Updating image in DaemonSet...${NC}"
oc set image daemonset/${IMAGE_NAME} ${IMAGE_NAME}="${REGISTRY}/${IMAGE_NAME}:${TAG}" -n "${NAMESPACE}" || true
-# Aplicar DaemonSet
-echo -e "${YELLOW}📦 Applying DaemonSet...${NC}"
+# Apply DaemonSet
+echo -e "${YELLOW}Applying DaemonSet...${NC}"
oc apply -f k8s/daemonset.yaml
-# Aplicar Service
-echo -e "${YELLOW}🌐 Applying Service...${NC}"
+# Apply Service
+echo -e "${YELLOW}Applying Service...${NC}"
oc apply -f k8s/service.yaml
-# Aplicar Route
-echo -e "${YELLOW}🛣️ Applying Route...${NC}"
+# Apply Route
+echo -e "${YELLOW}Applying Route...${NC}"
oc apply -f k8s/route.yaml
-# Aguardar pods ficarem prontos
-echo -e "${YELLOW}⏳ Waiting for pods to be ready...${NC}"
+# Wait for pods to be ready
+echo -e "${YELLOW}Waiting for pods to be ready...${NC}"
oc wait --for=condition=ready pod -l app.kubernetes.io/name=${IMAGE_NAME} -n "${NAMESPACE}" --timeout=300s
-# Obter URL da rota
+# Get route URL
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
if [ -n "${ROUTE_URL}" ]; then
- echo -e "${GREEN}🎉 Deploy completed successfully!${NC}"
- echo -e "${BLUE}🌐 Application URL: https://${ROUTE_URL}${NC}"
- echo -e "${BLUE}📊 GitHub Repository: ${REPO_URL}${NC}"
+ echo -e "${GREEN}SUCCESS: Deploy completed successfully!${NC}"
+ echo -e "${BLUE}Application URL: https://${ROUTE_URL}${NC}"
+ echo -e "${BLUE}GitHub Repository: ${REPO_URL}${NC}"
else
- echo -e "${YELLOW}⚠️ Deploy completed, but route URL not found.${NC}"
+ echo -e "${YELLOW}WARNING: Deploy completed, but route URL not found.${NC}"
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
fi
-# Mostrar status
-echo -e "${BLUE}📊 Deployment status:${NC}"
+# Show status
+echo -e "${BLUE}Deployment status:${NC}"
oc get all -n "${NAMESPACE}"
-echo -e "${BLUE}🔍 To check logs:${NC}"
+echo -e "${BLUE}To check logs:${NC}"
echo -e " oc logs -f daemonset/${IMAGE_NAME} -n ${NAMESPACE}"
-echo -e "${BLUE}🧪 To test health:${NC}"
+echo -e "${BLUE}To test health:${NC}"
echo -e " curl https://${ROUTE_URL}/health"
-echo -e "${BLUE}📝 To update from GitHub:${NC}"
+echo -e "${BLUE}To update from GitHub:${NC}"
echo -e " git pull origin main"
echo -e " ./openshift-deploy.sh "
diff --git a/scripts/auto-deploy.sh b/scripts/auto-deploy.sh
index 230f862..5369a41 100755
--- a/scripts/auto-deploy.sh
+++ b/scripts/auto-deploy.sh
@@ -1,117 +1,117 @@
#!/bin/bash
-# Script para deploy automático após GitHub Actions
-# Este script pode ser executado localmente ou via webhook
+# Auto-deploy script after GitHub Actions
+# This script can be executed locally or via webhook
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Configurações
+# Configuration
IMAGE_NAME="resource-governance"
REGISTRY="andersonid"
NAMESPACE="resource-governance"
IMAGE_TAG=${1:-latest}
-echo -e "${BLUE}🚀 Auto-Deploy para OpenShift${NC}"
+echo -e "${BLUE}Auto-Deploy to OpenShift${NC}"
echo "================================"
-echo "Imagem: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
+echo "Image: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
echo "Namespace: ${NAMESPACE}"
echo ""
-# 1. Verificar login no OpenShift
+# 1. Check OpenShift login
if ! oc whoami > /dev/null 2>&1; then
- echo -e "${RED}❌ Não logado no OpenShift. Por favor, faça login com 'oc login'.${NC}"
+ echo -e "${RED}ERROR: Not logged into OpenShift. Please login with 'oc login'.${NC}"
exit 1
fi
-echo -e "${GREEN}✅ Logado no OpenShift como: $(oc whoami)${NC}"
+echo -e "${GREEN}SUCCESS: Logged into OpenShift as: $(oc whoami)${NC}"
echo ""
-# 2. Verificar se a imagem existe no Docker Hub
-echo -e "${BLUE}🔍 Verificando imagem no Docker Hub...${NC}"
+# 2. Check if image exists on Docker Hub
+echo -e "${BLUE}Checking image on Docker Hub...${NC}"
if ! skopeo inspect docker://${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} > /dev/null 2>&1; then
- echo -e "${RED}❌ Imagem ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} não encontrada no Docker Hub!${NC}"
+ echo -e "${RED}ERROR: Image ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} not found on Docker Hub!${NC}"
exit 1
fi
-echo -e "${GREEN}✅ Imagem encontrada no Docker Hub${NC}"
+echo -e "${GREEN}SUCCESS: Image found on Docker Hub${NC}"
echo ""
-# 3. Verificar se o namespace existe
+# 3. Check if namespace exists
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
- echo -e "${BLUE}📋 Criando namespace ${NAMESPACE}...${NC}"
+ echo -e "${BLUE}Creating namespace ${NAMESPACE}...${NC}"
oc create namespace ${NAMESPACE}
else
- echo -e "${GREEN}✅ Namespace ${NAMESPACE} já existe${NC}"
+ echo -e "${GREEN}SUCCESS: Namespace ${NAMESPACE} already exists${NC}"
fi
echo ""
-# 4. Aplicar manifests básicos
-echo -e "${BLUE}📋 Aplicando manifests básicos...${NC}"
+# 4. Apply basic manifests
+echo -e "${BLUE}Applying basic manifests...${NC}"
oc apply -f k8s/rbac.yaml -n ${NAMESPACE}
oc apply -f k8s/configmap.yaml -n ${NAMESPACE}
echo ""
-# 5. Verificar se o deployment existe
+# 5. Check if deployment exists
if oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} > /dev/null 2>&1; then
- echo -e "${BLUE}🔄 Deployment existente encontrado. Iniciando atualização...${NC}"
+ echo -e "${BLUE}Existing deployment found. Starting update...${NC}"
- # Obter imagem atual
+ # Get current image
CURRENT_IMAGE=$(oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} -o jsonpath='{.spec.template.spec.containers[0].image}')
- echo "Imagem atual: ${CURRENT_IMAGE}"
- echo "Nova imagem: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
+ echo "Current image: ${CURRENT_IMAGE}"
+ echo "New image: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
- # Verificar se a imagem mudou
+ # Check if image changed
if [ "${CURRENT_IMAGE}" = "${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" ]; then
- echo -e "${YELLOW}⚠️ Imagem já está atualizada. Nenhuma ação necessária.${NC}"
+ echo -e "${YELLOW}WARNING: Image already up to date. No action needed.${NC}"
exit 0
fi
- # Atualizar deployment com nova imagem
- echo -e "${BLUE}🔄 Atualizando imagem do deployment...${NC}"
+ # Update deployment with new image
+ echo -e "${BLUE}Updating deployment image...${NC}"
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} -n ${NAMESPACE}
- # Aguardar rollout
- echo -e "${BLUE}⏳ Aguardando rollout (pode levar alguns minutos)...${NC}"
+ # Wait for rollout
+ echo -e "${BLUE}Waiting for rollout (may take a few minutes)...${NC}"
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
- echo -e "${GREEN}✅ Rollout concluído com sucesso!${NC}"
+ echo -e "${GREEN}SUCCESS: Rollout completed successfully!${NC}"
else
- echo -e "${BLUE}📦 Deployment não encontrado. Criando novo deployment...${NC}"
- # Aplicar deployment, service e route
+ echo -e "${BLUE}Deployment not found. Creating new deployment...${NC}"
+ # Apply deployment, service and route
oc apply -f k8s/deployment.yaml -n ${NAMESPACE}
oc apply -f k8s/service.yaml -n ${NAMESPACE}
oc apply -f k8s/route.yaml -n ${NAMESPACE}
- # Aguardar rollout inicial
- echo -e "${BLUE}⏳ Aguardando rollout inicial...${NC}"
+ # Wait for initial rollout
+ echo -e "${BLUE}Waiting for initial rollout...${NC}"
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
- echo -e "${GREEN}✅ Rollout inicial concluído com sucesso!${NC}"
+ echo -e "${GREEN}SUCCESS: Initial rollout completed successfully!${NC}"
fi
echo ""
-# 6. Verificar status final
-echo -e "${BLUE}📊 STATUS FINAL:${NC}"
+# 6. Check final status
+echo -e "${BLUE}FINAL STATUS:${NC}"
echo "================"
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
echo ""
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
echo ""
-# 7. Obter URLs de acesso
+# 7. Get access URLs
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
-echo -e "${BLUE}🌐 URLs de acesso:${NC}"
+echo -e "${BLUE}Access URLs:${NC}"
if [ -n "$ROUTE_URL" ]; then
echo " OpenShift: https://$ROUTE_URL"
else
- echo " OpenShift: Rota não encontrada ou não disponível."
+ echo " OpenShift: Route not found or not available."
fi
-echo " Port-forward: http://localhost:8080 (se ativo)"
+echo " Port-forward: http://localhost:8080 (if active)"
echo ""
-echo -e "${GREEN}✅ Auto-deploy concluído com sucesso!${NC}"
-echo -e "${BLUE}🔄 Estratégia: Rolling Update com maxUnavailable=0 (zero downtime)${NC}"
+echo -e "${GREEN}SUCCESS: Auto-deploy completed successfully!${NC}"
+echo -e "${BLUE}Strategy: Rolling Update with maxUnavailable=0 (zero downtime)${NC}"
diff --git a/scripts/build.sh b/scripts/build.sh
index 6b182a7..3534424 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -1,57 +1,57 @@
#!/bin/bash
-# Script de build para OpenShift Resource Governance Tool
+# Build script for OpenShift Resource Governance Tool
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Configurações
+# Configuration
IMAGE_NAME="resource-governance"
TAG="${1:-latest}"
REGISTRY="${2:-andersonid}"
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
-echo -e "${BLUE}🚀 Building OpenShift Resource Governance Tool${NC}"
+echo -e "${BLUE}Building OpenShift Resource Governance Tool${NC}"
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
-# Verificar se Podman está instalado
+# Check if Podman is installed
if ! command -v podman &> /dev/null; then
- echo -e "${RED}❌ Podman não está instalado. Instale o Podman e tente novamente.${NC}"
+ echo -e "${RED}ERROR: Podman is not installed. Install Podman and try again.${NC}"
exit 1
fi
-# Build da imagem
-echo -e "${YELLOW}📦 Building container image with Podman...${NC}"
+# Build image
+echo -e "${YELLOW}Building container image with Podman...${NC}"
podman build -t "${FULL_IMAGE_NAME}" .
if [ $? -eq 0 ]; then
- echo -e "${GREEN}✅ Image built successfully!${NC}"
+ echo -e "${GREEN}SUCCESS: Image built successfully!${NC}"
else
- echo -e "${RED}❌ Build failed!${NC}"
+ echo -e "${RED}ERROR: Build failed!${NC}"
exit 1
fi
-# Testar a imagem
-echo -e "${YELLOW}🧪 Testing image...${NC}"
-podman run --rm "${FULL_IMAGE_NAME}" python -c "import app.main; print('✅ App imports successfully')"
+# Test image
+echo -e "${YELLOW}Testing image...${NC}"
+podman run --rm "${FULL_IMAGE_NAME}" python -c "import app.main; print('SUCCESS: App imports successfully')"
if [ $? -eq 0 ]; then
- echo -e "${GREEN}✅ Image test passed!${NC}"
+ echo -e "${GREEN}SUCCESS: Image test passed!${NC}"
else
- echo -e "${RED}❌ Image test failed!${NC}"
+ echo -e "${RED}ERROR: Image test failed!${NC}"
exit 1
fi
-# Mostrar informações da imagem
-echo -e "${BLUE}📊 Image information:${NC}"
+# Show image information
+echo -e "${BLUE}Image information:${NC}"
podman images "${FULL_IMAGE_NAME}"
-echo -e "${GREEN}🎉 Build completed successfully!${NC}"
+echo -e "${GREEN}SUCCESS: Build completed successfully!${NC}"
echo -e "${BLUE}To push to registry:${NC}"
echo -e " podman push ${FULL_IMAGE_NAME}"
echo -e "${BLUE}To run locally:${NC}"
diff --git a/scripts/deploy.sh b/scripts/deploy.sh
index 517da4d..f16bf45 100755
--- a/scripts/deploy.sh
+++ b/scripts/deploy.sh
@@ -1,90 +1,90 @@
#!/bin/bash
-# Script de deploy para OpenShift Resource Governance Tool
+# Deploy script for OpenShift Resource Governance Tool
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Configurações
+# Configuration
NAMESPACE="resource-governance"
IMAGE_NAME="resource-governance"
TAG="${1:-latest}"
REGISTRY="${2:-andersonid}"
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
-echo -e "${BLUE}🚀 Deploying OpenShift Resource Governance Tool${NC}"
+echo -e "${BLUE}Deploying OpenShift Resource Governance Tool${NC}"
echo -e "${BLUE}Namespace: ${NAMESPACE}${NC}"
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
-# Verificar se oc está instalado
+# Check if oc is installed
if ! command -v oc &> /dev/null; then
- echo -e "${RED}❌ OpenShift CLI (oc) não está instalado.${NC}"
- echo -e "${YELLOW}Instale o oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
+ echo -e "${RED}ERROR: OpenShift CLI (oc) is not installed.${NC}"
+ echo -e "${YELLOW}Install oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
exit 1
fi
-# Verificar se está logado no OpenShift
+# Check if logged into OpenShift
if ! oc whoami &> /dev/null; then
- echo -e "${RED}❌ Não está logado no OpenShift.${NC}"
- echo -e "${YELLOW}Faça login com: oc login ${NC}"
+ echo -e "${RED}ERROR: Not logged into OpenShift.${NC}"
+ echo -e "${YELLOW}Login with: oc login ${NC}"
exit 1
fi
-echo -e "${GREEN}✅ Logado como: $(oc whoami)${NC}"
+echo -e "${GREEN}SUCCESS: Logged in as: $(oc whoami)${NC}"
-# Criar namespace se não existir
-echo -e "${YELLOW}📁 Creating namespace...${NC}"
+# Create namespace if it doesn't exist
+echo -e "${YELLOW}Creating namespace...${NC}"
oc apply -f k8s/namespace.yaml
-# Aplicar RBAC
-echo -e "${YELLOW}🔐 Applying RBAC...${NC}"
+# Apply RBAC
+echo -e "${YELLOW}Applying RBAC...${NC}"
oc apply -f k8s/rbac.yaml
-# Aplicar ConfigMap
-echo -e "${YELLOW}⚙️ Applying ConfigMap...${NC}"
+# Apply ConfigMap
+echo -e "${YELLOW}Applying ConfigMap...${NC}"
oc apply -f k8s/configmap.yaml
-# Atualizar imagem no DaemonSet
-echo -e "${YELLOW}🔄 Updating image in DaemonSet...${NC}"
+# Update image in DaemonSet
+echo -e "${YELLOW}Updating image in DaemonSet...${NC}"
oc set image daemonset/resource-governance resource-governance="${FULL_IMAGE_NAME}" -n "${NAMESPACE}"
-# Aplicar DaemonSet
-echo -e "${YELLOW}📦 Applying DaemonSet...${NC}"
+# Apply DaemonSet
+echo -e "${YELLOW}Applying DaemonSet...${NC}"
oc apply -f k8s/daemonset.yaml
-# Aplicar Service
-echo -e "${YELLOW}🌐 Applying Service...${NC}"
+# Apply Service
+echo -e "${YELLOW}Applying Service...${NC}"
oc apply -f k8s/service.yaml
-# Aplicar Route
-echo -e "${YELLOW}🛣️ Applying Route...${NC}"
+# Apply Route
+echo -e "${YELLOW}Applying Route...${NC}"
oc apply -f k8s/route.yaml
-# Aguardar pods ficarem prontos
-echo -e "${YELLOW}⏳ Waiting for pods to be ready...${NC}"
+# Wait for pods to be ready
+echo -e "${YELLOW}Waiting for pods to be ready...${NC}"
oc wait --for=condition=ready pod -l app.kubernetes.io/name=resource-governance -n "${NAMESPACE}" --timeout=300s
-# Obter URL da rota
+# Get route URL
ROUTE_URL=$(oc get route resource-governance-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
if [ -n "${ROUTE_URL}" ]; then
- echo -e "${GREEN}🎉 Deploy completed successfully!${NC}"
- echo -e "${BLUE}🌐 Application URL: https://${ROUTE_URL}${NC}"
+ echo -e "${GREEN}SUCCESS: Deploy completed successfully!${NC}"
+ echo -e "${BLUE}Application URL: https://${ROUTE_URL}${NC}"
else
- echo -e "${YELLOW}⚠️ Deploy completed, but route URL not found.${NC}"
+ echo -e "${YELLOW}WARNING: Deploy completed, but route URL not found.${NC}"
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
fi
-# Mostrar status
-echo -e "${BLUE}📊 Deployment status:${NC}"
+# Show status
+echo -e "${BLUE}Deployment status:${NC}"
oc get all -n "${NAMESPACE}"
-echo -e "${BLUE}🔍 To check logs:${NC}"
+echo -e "${BLUE}To check logs:${NC}"
echo -e " oc logs -f daemonset/resource-governance -n ${NAMESPACE}"
-echo -e "${BLUE}🧪 To test health:${NC}"
+echo -e "${BLUE}To test health:${NC}"
echo -e " curl https://${ROUTE_URL}/health"
diff --git a/scripts/release.sh b/scripts/release.sh
index 7b87a0d..013ab98 100755
--- a/scripts/release.sh
+++ b/scripts/release.sh
@@ -1,46 +1,46 @@
#!/bin/bash
-# Script para criar releases e tags do OpenShift Resource Governance
+# Script to create releases and tags for OpenShift Resource Governance
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Função para mostrar ajuda
+# Function to show help
show_help() {
- echo "🚀 OpenShift Resource Governance - Release Script"
- echo "=================================================="
+ echo "OpenShift Resource Governance - Release Script"
+ echo "=============================================="
echo ""
- echo "Uso: $0 [COMANDO] [VERSÃO]"
+ echo "Usage: $0 [COMMAND] [VERSION]"
echo ""
- echo "Comandos:"
- echo " patch Criar release patch (ex: 1.0.0 -> 1.0.1)"
- echo " minor Criar release minor (ex: 1.0.0 -> 1.1.0)"
- echo " major Criar release major (ex: 1.0.0 -> 2.0.0)"
- echo " custom Criar release com versão customizada"
- echo " list Listar releases existentes"
- echo " help Mostrar esta ajuda"
+ echo "Commands:"
+ echo " patch Create patch release (ex: 1.0.0 -> 1.0.1)"
+ echo " minor Create minor release (ex: 1.0.0 -> 1.1.0)"
+ echo " major Create major release (ex: 1.0.0 -> 2.0.0)"
+ echo " custom Create release with custom version"
+ echo " list List existing releases"
+ echo " help Show this help"
echo ""
- echo "Exemplos:"
+ echo "Examples:"
echo " $0 patch # 1.0.0 -> 1.0.1"
echo " $0 minor # 1.0.0 -> 1.1.0"
- echo " $0 custom 2.0.0-beta.1 # Versão customizada"
- echo " $0 list # Listar releases"
+ echo " $0 custom 2.0.0-beta.1 # Custom version"
+ echo " $0 list # List releases"
echo ""
}
-# Função para obter a versão atual
+# Function to get current version
get_current_version() {
local latest_tag=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
- echo "${latest_tag#v}" # Remove o 'v' do início
+ echo "${latest_tag#v}" # Remove 'v' prefix
}
-# Função para incrementar versão
+# Function to increment version
increment_version() {
local version=$1
local type=$2
@@ -66,78 +66,78 @@ increment_version() {
esac
}
-# Função para validar versão
+# Function to validate version
validate_version() {
local version=$1
if [[ ! $version =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
- echo -e "${RED}❌ Versão inválida: $version${NC}"
- echo "Formato esperado: X.Y.Z ou X.Y.Z-suffix"
+ echo -e "${RED}ERROR: Invalid version: $version${NC}"
+ echo "Expected format: X.Y.Z or X.Y.Z-suffix"
exit 1
fi
}
-# Função para criar release
+# Function to create release
create_release() {
local version=$1
local tag="v$version"
- echo -e "${BLUE}🚀 Criando release $tag${NC}"
+ echo -e "${BLUE}Creating release $tag${NC}"
echo ""
- # Verificar se já existe
+ # Check if already exists
if git tag -l | grep -q "^$tag$"; then
- echo -e "${RED}❌ Tag $tag já existe!${NC}"
+ echo -e "${RED}ERROR: Tag $tag already exists!${NC}"
exit 1
fi
- # Verificar se há mudanças não commitadas
+ # Check for uncommitted changes
if ! git diff-index --quiet HEAD --; then
- echo -e "${YELLOW}⚠️ Há mudanças não commitadas. Deseja continuar? (y/N)${NC}"
+ echo -e "${YELLOW}WARNING: There are uncommitted changes. Continue? (y/N)${NC}"
read -r response
if [[ ! "$response" =~ ^[Yy]$ ]]; then
- echo "Cancelado."
+ echo "Cancelled."
exit 1
fi
fi
- # Fazer commit das mudanças se houver
+ # Commit changes if any
if ! git diff-index --quiet HEAD --; then
- echo -e "${BLUE}📝 Fazendo commit das mudanças...${NC}"
+ echo -e "${BLUE}Committing changes...${NC}"
git add .
git commit -m "Release $tag"
fi
- # Criar tag
- echo -e "${BLUE}🏷️ Criando tag $tag...${NC}"
+ # Create tag
+ echo -e "${BLUE}Creating tag $tag...${NC}"
git tag -a "$tag" -m "Release $tag"
- # Push da tag
- echo -e "${BLUE}📤 Fazendo push da tag...${NC}"
+ # Push tag
+ echo -e "${BLUE}Pushing tag...${NC}"
git push origin "$tag"
echo ""
- echo -e "${GREEN}✅ Release $tag criado com sucesso!${NC}"
+ echo -e "${GREEN}SUCCESS: Release $tag created successfully!${NC}"
echo ""
- echo "🔗 Links úteis:"
+ echo "Useful links:"
echo " GitHub: https://github.com/andersonid/openshift-resource-governance/releases/tag/$tag"
echo " Docker Hub: https://hub.docker.com/r/andersonid/resource-governance/tags"
echo ""
- echo "🚀 O GitHub Actions irá automaticamente:"
- echo " 1. Buildar a imagem Docker"
- echo " 2. Fazer push para Docker Hub"
- echo " 3. Criar release no GitHub"
+ echo "GitHub Actions will automatically:"
+ echo " 1. Build Docker image"
+ echo " 2. Push to Docker Hub"
+ echo " 3. Create GitHub release"
echo ""
- echo "⏳ Aguarde alguns minutos e verifique:"
+ echo "Wait a few minutes and check:"
echo " gh run list --repo andersonid/openshift-resource-governance --workflow='build-only.yml'"
}
-# Função para listar releases
+# Function to list releases
list_releases() {
- echo -e "${BLUE}📋 Releases existentes:${NC}"
+ echo -e "${BLUE}Existing releases:${NC}"
echo ""
git tag -l --sort=-version:refname | head -10
echo ""
- echo "💡 Para ver todos: git tag -l --sort=-version:refname"
+ echo "To see all: git tag -l --sort=-version:refname"
}
# Main
@@ -162,8 +162,8 @@ case "${1:-help}" in
;;
"custom")
if [ -z "$2" ]; then
- echo -e "${RED}❌ Versão customizada não fornecida!${NC}"
- echo "Uso: $0 custom 2.0.0-beta.1"
+ echo -e "${RED}ERROR: Custom version not provided!${NC}"
+ echo "Usage: $0 custom 2.0.0-beta.1"
exit 1
fi
validate_version "$2"
diff --git a/scripts/webhook-deploy.py b/scripts/webhook-deploy.py
index 17dfec8..6716e16 100755
--- a/scripts/webhook-deploy.py
+++ b/scripts/webhook-deploy.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
"""
-Webhook para deploy automático após GitHub Actions
-Este script pode ser executado como um serviço para detectar mudanças no Docker Hub
+Webhook for automatic deployment after GitHub Actions
+This script can be run as a service to detect changes on Docker Hub
"""
import os
@@ -11,13 +11,13 @@ import logging
from flask import Flask, request, jsonify
from datetime import datetime
-# Configuração do logging
+# Logging configuration
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
-# Configurações
+# Configuration
IMAGE_NAME = os.getenv('IMAGE_NAME', 'resource-governance')
REGISTRY = os.getenv('REGISTRY', 'andersonid')
NAMESPACE = os.getenv('NAMESPACE', 'resource-governance')
@@ -25,100 +25,100 @@ SCRIPT_PATH = os.getenv('AUTO_DEPLOY_SCRIPT', './scripts/auto-deploy.sh')
@app.route('/webhook/dockerhub', methods=['POST'])
def dockerhub_webhook():
- """Webhook para receber notificações do Docker Hub"""
+ """Webhook to receive Docker Hub notifications"""
try:
data = request.get_json()
- # Verificar se é uma notificação de push
+ # Check if it's a push notification
if data.get('push_data', {}).get('tag') == 'latest':
- logger.info(f"Recebida notificação de push para {REGISTRY}/{IMAGE_NAME}:latest")
+ logger.info(f"Received push notification for {REGISTRY}/{IMAGE_NAME}:latest")
- # Executar deploy automático
+ # Execute automatic deployment
result = run_auto_deploy('latest')
return jsonify({
'status': 'success',
- 'message': 'Deploy automático iniciado',
+ 'message': 'Automatic deployment started',
'result': result
}), 200
else:
- logger.info(f"Push ignorado - tag: {data.get('push_data', {}).get('tag')}")
- return jsonify({'status': 'ignored', 'message': 'Tag não é latest'}), 200
+ logger.info(f"Push ignored - tag: {data.get('push_data', {}).get('tag')}")
+ return jsonify({'status': 'ignored', 'message': 'Tag is not latest'}), 200
except Exception as e:
- logger.error(f"Erro no webhook: {e}")
+ logger.error(f"Webhook error: {e}")
return jsonify({'status': 'error', 'message': str(e)}), 500
@app.route('/webhook/github', methods=['POST'])
def github_webhook():
- """Webhook para receber notificações do GitHub"""
+ """Webhook to receive GitHub notifications"""
try:
- # Verificar se é um push para main
+ # Check if it's a push to main
if request.headers.get('X-GitHub-Event') == 'push':
data = request.get_json()
if data.get('ref') == 'refs/heads/main':
- logger.info("Recebida notificação de push para main branch")
+ logger.info("Received push notification for main branch")
- # Executar deploy automático
+ # Execute automatic deployment
result = run_auto_deploy('latest')
return jsonify({
'status': 'success',
- 'message': 'Deploy automático iniciado',
+ 'message': 'Automatic deployment started',
'result': result
}), 200
else:
- logger.info(f"Push ignorado - branch: {data.get('ref')}")
- return jsonify({'status': 'ignored', 'message': 'Branch não é main'}), 200
+ logger.info(f"Push ignored - branch: {data.get('ref')}")
+ return jsonify({'status': 'ignored', 'message': 'Branch is not main'}), 200
else:
- logger.info(f"Evento ignorado: {request.headers.get('X-GitHub-Event')}")
- return jsonify({'status': 'ignored', 'message': 'Evento não é push'}), 200
+ logger.info(f"Event ignored: {request.headers.get('X-GitHub-Event')}")
+ return jsonify({'status': 'ignored', 'message': 'Event is not push'}), 200
except Exception as e:
- logger.error(f"Erro no webhook: {e}")
+ logger.error(f"Webhook error: {e}")
return jsonify({'status': 'error', 'message': str(e)}), 500
@app.route('/deploy/', methods=['POST'])
def manual_deploy(tag):
- """Deploy manual com tag específica"""
+ """Manual deployment with specific tag"""
try:
- logger.info(f"Deploy manual solicitado para tag: {tag}")
+ logger.info(f"Manual deployment requested for tag: {tag}")
result = run_auto_deploy(tag)
return jsonify({
'status': 'success',
- 'message': f'Deploy manual iniciado para tag: {tag}',
+ 'message': f'Manual deployment started for tag: {tag}',
'result': result
}), 200
except Exception as e:
- logger.error(f"Erro no deploy manual: {e}")
+ logger.error(f"Manual deployment error: {e}")
return jsonify({'status': 'error', 'message': str(e)}), 500
def run_auto_deploy(tag):
- """Executar script de deploy automático"""
+ """Execute automatic deployment script"""
try:
- logger.info(f"Executando deploy automático para tag: {tag}")
+ logger.info(f"Executing automatic deployment for tag: {tag}")
- # Executar script de deploy
+ # Execute deployment script
result = subprocess.run(
[SCRIPT_PATH, tag],
capture_output=True,
text=True,
- timeout=600 # 10 minutos timeout
+ timeout=600 # 10 minutes timeout
)
if result.returncode == 0:
- logger.info("Deploy automático concluído com sucesso")
+ logger.info("Automatic deployment completed successfully")
return {
'success': True,
'stdout': result.stdout,
'stderr': result.stderr
}
else:
- logger.error(f"Deploy automático falhou: {result.stderr}")
+ logger.error(f"Automatic deployment failed: {result.stderr}")
return {
'success': False,
'stdout': result.stdout,
@@ -126,13 +126,13 @@ def run_auto_deploy(tag):
}
except subprocess.TimeoutExpired:
- logger.error("Deploy automático timeout")
+ logger.error("Automatic deployment timeout")
return {
'success': False,
'error': 'Timeout'
}
except Exception as e:
- logger.error(f"Erro ao executar deploy automático: {e}")
+ logger.error(f"Error executing automatic deployment: {e}")
return {
'success': False,
'error': str(e)
@@ -150,9 +150,9 @@ def health():
@app.route('/status', methods=['GET'])
def status():
- """Status do serviço"""
+ """Service status"""
try:
- # Verificar se está logado no OpenShift
+ # Check if logged into OpenShift
result = subprocess.run(['oc', 'whoami'], capture_output=True, text=True)
return jsonify({
@@ -174,7 +174,7 @@ if __name__ == '__main__':
port = int(os.getenv('PORT', 8080))
debug = os.getenv('DEBUG', 'false').lower() == 'true'
- logger.info(f"Iniciando webhook server na porta {port}")
- logger.info(f"Configurações: IMAGE_NAME={IMAGE_NAME}, REGISTRY={REGISTRY}, NAMESPACE={NAMESPACE}")
+ logger.info(f"Starting webhook server on port {port}")
+ logger.info(f"Configuration: IMAGE_NAME={IMAGE_NAME}, REGISTRY={REGISTRY}, NAMESPACE={NAMESPACE}")
app.run(host='0.0.0.0', port=port, debug=debug)
diff --git a/setup.sh b/setup.sh
index 4195534..19c55a0 100755
--- a/setup.sh
+++ b/setup.sh
@@ -1,67 +1,67 @@
#!/bin/bash
-# Script de setup para OpenShift Resource Governance Tool
+# Setup script for OpenShift Resource Governance Tool
set -e
-# Cores para output
+# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-echo -e "${BLUE}🚀 Setting up OpenShift Resource Governance Tool${NC}"
+echo -e "${BLUE}Setting up OpenShift Resource Governance Tool${NC}"
-# Verificar se Python está instalado
+# Check if Python is installed
if ! command -v python3 &> /dev/null; then
- echo -e "${RED}❌ Python 3 não está instalado.${NC}"
- echo -e "${YELLOW}Instale Python 3.11+ e tente novamente.${NC}"
+ echo -e "${RED}ERROR: Python 3 is not installed.${NC}"
+ echo -e "${YELLOW}Install Python 3.11+ and try again.${NC}"
exit 1
fi
-# Verificar se pip está instalado
+# Check if pip is installed
if ! command -v pip3 &> /dev/null; then
- echo -e "${RED}❌ pip3 não está instalado.${NC}"
- echo -e "${YELLOW}Instale pip3 e tente novamente.${NC}"
+ echo -e "${RED}ERROR: pip3 is not installed.${NC}"
+ echo -e "${YELLOW}Install pip3 and try again.${NC}"
exit 1
fi
-# Instalar dependências Python
-echo -e "${YELLOW}📦 Installing Python dependencies...${NC}"
+# Install Python dependencies
+echo -e "${YELLOW}Installing Python dependencies...${NC}"
pip3 install -r requirements.txt
-# Tornar scripts executáveis
-echo -e "${YELLOW}🔧 Making scripts executable...${NC}"
+# Make scripts executable
+echo -e "${YELLOW}Making scripts executable...${NC}"
chmod +x scripts/*.sh
-# Criar diretório de relatórios
-echo -e "${YELLOW}📁 Creating reports directory...${NC}"
+# Create reports directory
+echo -e "${YELLOW}Creating reports directory...${NC}"
mkdir -p reports
-# Verificar se Docker está instalado
+# Check if Docker is installed
if command -v docker &> /dev/null; then
- echo -e "${GREEN}✅ Docker encontrado${NC}"
+ echo -e "${GREEN}SUCCESS: Docker found${NC}"
else
- echo -e "${YELLOW}⚠️ Docker não encontrado. Instale para fazer build da imagem.${NC}"
+ echo -e "${YELLOW}WARNING: Docker not found. Install to build image.${NC}"
fi
-# Verificar se oc está instalado
+# Check if oc is installed
if command -v oc &> /dev/null; then
- echo -e "${GREEN}✅ OpenShift CLI (oc) encontrado${NC}"
+ echo -e "${GREEN}SUCCESS: OpenShift CLI (oc) found${NC}"
else
- echo -e "${YELLOW}⚠️ OpenShift CLI (oc) não encontrado. Instale para fazer deploy.${NC}"
+ echo -e "${YELLOW}WARNING: OpenShift CLI (oc) not found. Install to deploy.${NC}"
fi
-echo -e "${GREEN}🎉 Setup completed successfully!${NC}"
+echo -e "${GREEN}SUCCESS: Setup completed successfully!${NC}"
echo ""
-echo -e "${BLUE}Próximos passos:${NC}"
-echo -e "1. ${YELLOW}Desenvolvimento local:${NC} make dev"
-echo -e "2. ${YELLOW}Build da imagem:${NC} make build"
-echo -e "3. ${YELLOW}Deploy no OpenShift:${NC} make deploy"
-echo -e "4. ${YELLOW}Ver documentação:${NC} cat README.md"
+echo -e "${BLUE}Next steps:${NC}"
+echo -e "1. ${YELLOW}Local development:${NC} make dev"
+echo -e "2. ${YELLOW}Build image:${NC} make build"
+echo -e "3. ${YELLOW}Deploy to OpenShift:${NC} make deploy"
+echo -e "4. ${YELLOW}View documentation:${NC} cat README.md"
echo ""
-echo -e "${BLUE}Comandos úteis:${NC}"
-echo -e " make help - Mostrar todos os comandos"
-echo -e " make test - Executar testes"
-echo -e " make logs - Ver logs da aplicação"
-echo -e " make status - Ver status da aplicação"
+echo -e "${BLUE}Useful commands:${NC}"
+echo -e " make help - Show all commands"
+echo -e " make test - Run tests"
+echo -e " make logs - View application logs"
+echo -e " make status - View application status"