Translate all Portuguese text to English
This commit is contained in:
@@ -17,19 +17,19 @@ from app.services.historical_analysis import HistoricalAnalysisService
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Criar router
|
# Create router
|
||||||
api_router = APIRouter()
|
api_router = APIRouter()
|
||||||
|
|
||||||
# Inicializar serviços
|
# Initialize services
|
||||||
validation_service = ValidationService()
|
validation_service = ValidationService()
|
||||||
report_service = ReportService()
|
report_service = ReportService()
|
||||||
|
|
||||||
def get_k8s_client(request: Request):
|
def get_k8s_client(request: Request):
|
||||||
"""Dependency para obter cliente Kubernetes"""
|
"""Dependency to get Kubernetes client"""
|
||||||
return request.app.state.k8s_client
|
return request.app.state.k8s_client
|
||||||
|
|
||||||
def get_prometheus_client(request: Request):
|
def get_prometheus_client(request: Request):
|
||||||
"""Dependency para obter cliente Prometheus"""
|
"""Dependency to get Prometheus client"""
|
||||||
return request.app.state.prometheus_client
|
return request.app.state.prometheus_client
|
||||||
|
|
||||||
@api_router.get("/cluster/status")
|
@api_router.get("/cluster/status")
|
||||||
@@ -39,17 +39,17 @@ async def get_cluster_status(
|
|||||||
):
|
):
|
||||||
"""Get overall cluster status"""
|
"""Get overall cluster status"""
|
||||||
try:
|
try:
|
||||||
# Coletar dados básicos
|
# Collect basic data
|
||||||
pods = await k8s_client.get_all_pods()
|
pods = await k8s_client.get_all_pods()
|
||||||
nodes_info = await k8s_client.get_nodes_info()
|
nodes_info = await k8s_client.get_nodes_info()
|
||||||
|
|
||||||
# Validar recursos
|
# Validate resources
|
||||||
all_validations = []
|
all_validations = []
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_validations = validation_service.validate_pod_resources(pod)
|
pod_validations = validation_service.validate_pod_resources(pod)
|
||||||
all_validations.extend(pod_validations)
|
all_validations.extend(pod_validations)
|
||||||
|
|
||||||
# Obter informações de overcommit
|
# Get overcommit information
|
||||||
overcommit_info = await prometheus_client.get_cluster_overcommit()
|
overcommit_info = await prometheus_client.get_cluster_overcommit()
|
||||||
|
|
||||||
# Get VPA recommendations
|
# Get VPA recommendations
|
||||||
@@ -78,19 +78,19 @@ async def get_namespace_status(
|
|||||||
):
|
):
|
||||||
"""Get status of a specific namespace"""
|
"""Get status of a specific namespace"""
|
||||||
try:
|
try:
|
||||||
# Coletar dados do namespace
|
# Collect namespace data
|
||||||
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
||||||
|
|
||||||
# Validar recursos
|
# Validate resources
|
||||||
all_validations = []
|
all_validations = []
|
||||||
for pod in namespace_resources.pods:
|
for pod in namespace_resources.pods:
|
||||||
pod_validations = validation_service.validate_pod_resources(pod)
|
pod_validations = validation_service.validate_pod_resources(pod)
|
||||||
all_validations.extend(pod_validations)
|
all_validations.extend(pod_validations)
|
||||||
|
|
||||||
# Obter uso de recursos do Prometheus
|
# Get resource usage from Prometheus
|
||||||
resource_usage = await prometheus_client.get_namespace_resource_usage(namespace)
|
resource_usage = await prometheus_client.get_namespace_resource_usage(namespace)
|
||||||
|
|
||||||
# Generate report do namespace
|
# Generate namespace report
|
||||||
report = report_service.generate_namespace_report(
|
report = report_service.generate_namespace_report(
|
||||||
namespace=namespace,
|
namespace=namespace,
|
||||||
pods=namespace_resources.pods,
|
pods=namespace_resources.pods,
|
||||||
@@ -131,26 +131,26 @@ async def get_validations(
|
|||||||
):
|
):
|
||||||
"""List resource validations with pagination"""
|
"""List resource validations with pagination"""
|
||||||
try:
|
try:
|
||||||
# Coletar pods
|
# Collect pods
|
||||||
if namespace:
|
if namespace:
|
||||||
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
||||||
pods = namespace_resources.pods
|
pods = namespace_resources.pods
|
||||||
else:
|
else:
|
||||||
pods = await k8s_client.get_all_pods()
|
pods = await k8s_client.get_all_pods()
|
||||||
|
|
||||||
# Validar recursos
|
# Validate resources
|
||||||
all_validations = []
|
all_validations = []
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_validations = validation_service.validate_pod_resources(pod)
|
pod_validations = validation_service.validate_pod_resources(pod)
|
||||||
all_validations.extend(pod_validations)
|
all_validations.extend(pod_validations)
|
||||||
|
|
||||||
# Filtrar por severidade se especificado
|
# Filter by severity if specified
|
||||||
if severity:
|
if severity:
|
||||||
all_validations = [
|
all_validations = [
|
||||||
v for v in all_validations if v.severity == severity
|
v for v in all_validations if v.severity == severity
|
||||||
]
|
]
|
||||||
|
|
||||||
# Paginação
|
# Pagination
|
||||||
total = len(all_validations)
|
total = len(all_validations)
|
||||||
start = (page - 1) * page_size
|
start = (page - 1) * page_size
|
||||||
end = start + page_size
|
end = start + page_size
|
||||||
@@ -180,10 +180,10 @@ async def get_validations_by_namespace(
|
|||||||
):
|
):
|
||||||
"""List validations grouped by namespace with pagination"""
|
"""List validations grouped by namespace with pagination"""
|
||||||
try:
|
try:
|
||||||
# Coletar todos os pods com filtro de namespaces do sistema
|
# Collect all pods with system namespace filter
|
||||||
pods = await k8s_client.get_all_pods(include_system_namespaces=include_system_namespaces)
|
pods = await k8s_client.get_all_pods(include_system_namespaces=include_system_namespaces)
|
||||||
|
|
||||||
# Validar recursos e agrupar por namespace
|
# Validate resources and group by namespace
|
||||||
namespace_validations = {}
|
namespace_validations = {}
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_validations = validation_service.validate_pod_resources(pod)
|
pod_validations = validation_service.validate_pod_resources(pod)
|
||||||
@@ -203,14 +203,14 @@ async def get_validations_by_namespace(
|
|||||||
"validations": []
|
"validations": []
|
||||||
}
|
}
|
||||||
|
|
||||||
# Filtrar por severidade se especificado
|
# Filter by severity if specified
|
||||||
if severity:
|
if severity:
|
||||||
pod_validations = [v for v in pod_validations if v.severity == severity]
|
pod_validations = [v for v in pod_validations if v.severity == severity]
|
||||||
|
|
||||||
namespace_validations[pod.namespace]["pods"][pod.name]["validations"] = pod_validations
|
namespace_validations[pod.namespace]["pods"][pod.name]["validations"] = pod_validations
|
||||||
namespace_validations[pod.namespace]["total_validations"] += len(pod_validations)
|
namespace_validations[pod.namespace]["total_validations"] += len(pod_validations)
|
||||||
|
|
||||||
# Contar severidades
|
# Count severities
|
||||||
for validation in pod_validations:
|
for validation in pod_validations:
|
||||||
namespace_validations[pod.namespace]["severity_breakdown"][validation.severity] += 1
|
namespace_validations[pod.namespace]["severity_breakdown"][validation.severity] += 1
|
||||||
|
|
||||||
@@ -218,7 +218,7 @@ async def get_validations_by_namespace(
|
|||||||
namespace_list = list(namespace_validations.values())
|
namespace_list = list(namespace_validations.values())
|
||||||
namespace_list.sort(key=lambda x: x["total_validations"], reverse=True)
|
namespace_list.sort(key=lambda x: x["total_validations"], reverse=True)
|
||||||
|
|
||||||
# Paginação
|
# Pagination
|
||||||
total = len(namespace_list)
|
total = len(namespace_list)
|
||||||
start = (page - 1) * page_size
|
start = (page - 1) * page_size
|
||||||
end = start + page_size
|
end = start + page_size
|
||||||
@@ -270,17 +270,17 @@ async def export_report(
|
|||||||
pods = await k8s_client.get_all_pods()
|
pods = await k8s_client.get_all_pods()
|
||||||
nodes_info = await k8s_client.get_nodes_info()
|
nodes_info = await k8s_client.get_nodes_info()
|
||||||
|
|
||||||
# Filtrar por namespaces se especificado
|
# Filter by namespaces if specified
|
||||||
if export_request.namespaces:
|
if export_request.namespaces:
|
||||||
pods = [p for p in pods if p.namespace in export_request.namespaces]
|
pods = [p for p in pods if p.namespace in export_request.namespaces]
|
||||||
|
|
||||||
# Validar recursos
|
# Validate resources
|
||||||
all_validations = []
|
all_validations = []
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_validations = validation_service.validate_pod_resources(pod)
|
pod_validations = validation_service.validate_pod_resources(pod)
|
||||||
all_validations.extend(pod_validations)
|
all_validations.extend(pod_validations)
|
||||||
|
|
||||||
# Obter informações adicionais
|
# Get additional information
|
||||||
overcommit_info = {}
|
overcommit_info = {}
|
||||||
vpa_recommendations = []
|
vpa_recommendations = []
|
||||||
|
|
||||||
@@ -299,7 +299,7 @@ async def export_report(
|
|||||||
nodes_info=nodes_info
|
nodes_info=nodes_info
|
||||||
)
|
)
|
||||||
|
|
||||||
# Exportar
|
# Export
|
||||||
filepath = await report_service.export_report(report, export_request)
|
filepath = await report_service.export_report(report, export_request)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -331,7 +331,7 @@ async def download_exported_file(filename: str):
|
|||||||
file_info = next((f for f in files if f["filename"] == filename), None)
|
file_info = next((f for f in files if f["filename"] == filename), None)
|
||||||
|
|
||||||
if not file_info:
|
if not file_info:
|
||||||
raise HTTPException(status_code=404, detail="Arquivo não encontrado")
|
raise HTTPException(status_code=404, detail="File not found")
|
||||||
|
|
||||||
return FileResponse(
|
return FileResponse(
|
||||||
path=file_info["filepath"],
|
path=file_info["filepath"],
|
||||||
@@ -350,18 +350,18 @@ async def apply_recommendation(
|
|||||||
):
|
):
|
||||||
"""Apply resource recommendation"""
|
"""Apply resource recommendation"""
|
||||||
try:
|
try:
|
||||||
# TODO: Implementar aplicação de recomendações
|
# TODO: Implement recommendation application
|
||||||
# Por enquanto, apenas simular
|
# For now, just simulate
|
||||||
if recommendation.dry_run:
|
if recommendation.dry_run:
|
||||||
return {
|
return {
|
||||||
"message": "Dry run - recomendação seria aplicada",
|
"message": "Dry run - recommendation would be applied",
|
||||||
"pod": recommendation.pod_name,
|
"pod": recommendation.pod_name,
|
||||||
"namespace": recommendation.namespace,
|
"namespace": recommendation.namespace,
|
||||||
"container": recommendation.container_name,
|
"container": recommendation.container_name,
|
||||||
"action": f"{recommendation.action} {recommendation.resource_type} = {recommendation.value}"
|
"action": f"{recommendation.action} {recommendation.resource_type} = {recommendation.value}"
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
# Implementar aplicação real da recomendação
|
# Implement real recommendation application
|
||||||
raise HTTPException(status_code=501, detail="Recommendation application not implemented yet")
|
raise HTTPException(status_code=501, detail="Recommendation application not implemented yet")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -378,14 +378,14 @@ async def get_historical_validations(
|
|||||||
try:
|
try:
|
||||||
validation_service = ValidationService()
|
validation_service = ValidationService()
|
||||||
|
|
||||||
# Coletar pods
|
# Collect pods
|
||||||
if namespace:
|
if namespace:
|
||||||
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
namespace_resources = await k8s_client.get_namespace_resources(namespace)
|
||||||
pods = namespace_resources.pods
|
pods = namespace_resources.pods
|
||||||
else:
|
else:
|
||||||
pods = await k8s_client.get_all_pods()
|
pods = await k8s_client.get_all_pods()
|
||||||
|
|
||||||
# Validar com análise histórica
|
# Validate with historical analysis
|
||||||
all_validations = []
|
all_validations = []
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_validations = await validation_service.validate_pod_resources_with_historical_analysis(
|
pod_validations = await validation_service.validate_pod_resources_with_historical_analysis(
|
||||||
|
|||||||
@@ -18,12 +18,12 @@ class Settings(BaseSettings):
|
|||||||
prometheus_url: str = "http://prometheus.openshift-monitoring.svc.cluster.local:9090"
|
prometheus_url: str = "http://prometheus.openshift-monitoring.svc.cluster.local:9090"
|
||||||
|
|
||||||
# Validation settings
|
# Validation settings
|
||||||
cpu_limit_ratio: float = 3.0 # Ratio padrão limit:request para CPU
|
cpu_limit_ratio: float = 3.0 # Default limit:request ratio for CPU
|
||||||
memory_limit_ratio: float = 3.0 # Ratio padrão limit:request para memória
|
memory_limit_ratio: float = 3.0 # Default limit:request ratio for memory
|
||||||
min_cpu_request: str = "10m" # Mínimo de CPU request
|
min_cpu_request: str = "10m" # Minimum CPU request
|
||||||
min_memory_request: str = "32Mi" # Mínimo de memória request
|
min_memory_request: str = "32Mi" # Minimum memory request
|
||||||
|
|
||||||
# Namespaces críticos para VPA
|
# Critical namespaces for VPA
|
||||||
critical_namespaces: List[str] = [
|
critical_namespaces: List[str] = [
|
||||||
"openshift-monitoring",
|
"openshift-monitoring",
|
||||||
"openshift-ingress",
|
"openshift-ingress",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Cliente Kubernetes/OpenShift para coleta de dados
|
Kubernetes/OpenShift client for data collection
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
@@ -14,7 +14,7 @@ from app.models.resource_models import PodResource, NamespaceResources, VPARecom
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class K8sClient:
|
class K8sClient:
|
||||||
"""Cliente para interação com Kubernetes/OpenShift"""
|
"""Client for interaction with Kubernetes/OpenShift"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.v1 = None
|
self.v1 = None
|
||||||
@@ -23,16 +23,16 @@ class K8sClient:
|
|||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
"""Inicializar cliente Kubernetes"""
|
"""Initialize Kubernetes client"""
|
||||||
try:
|
try:
|
||||||
# Tentar carregar configuração do cluster
|
# Try to load cluster configuration
|
||||||
if settings.kubeconfig_path:
|
if settings.kubeconfig_path:
|
||||||
config.load_kube_config(config_file=settings.kubeconfig_path)
|
config.load_kube_config(config_file=settings.kubeconfig_path)
|
||||||
else:
|
else:
|
||||||
# Usar configuração in-cluster
|
# Use in-cluster configuration
|
||||||
config.load_incluster_config()
|
config.load_incluster_config()
|
||||||
|
|
||||||
# Inicializar clientes da API
|
# Initialize API clients
|
||||||
self.v1 = client.CoreV1Api()
|
self.v1 = client.CoreV1Api()
|
||||||
self.autoscaling_v1 = client.AutoscalingV1Api()
|
self.autoscaling_v1 = client.AutoscalingV1Api()
|
||||||
self.apps_v1 = client.AppsV1Api()
|
self.apps_v1 = client.AppsV1Api()
|
||||||
@@ -45,8 +45,8 @@ class K8sClient:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
def _is_system_namespace(self, namespace: str, include_system: bool = None) -> bool:
|
def _is_system_namespace(self, namespace: str, include_system: bool = None) -> bool:
|
||||||
"""Verificar se um namespace é do sistema"""
|
"""Check if a namespace is a system namespace"""
|
||||||
# Usar parâmetro se fornecido, senão usar configuração global
|
# Use parameter if provided, otherwise use global configuration
|
||||||
should_include = include_system if include_system is not None else settings.include_system_namespaces
|
should_include = include_system if include_system is not None else settings.include_system_namespaces
|
||||||
|
|
||||||
if should_include:
|
if should_include:
|
||||||
@@ -58,18 +58,18 @@ class K8sClient:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
async def get_all_pods(self, include_system_namespaces: bool = None) -> List[PodResource]:
|
async def get_all_pods(self, include_system_namespaces: bool = None) -> List[PodResource]:
|
||||||
"""Coletar informações de todos os pods do cluster"""
|
"""Collect information from all pods in the cluster"""
|
||||||
if not self.initialized:
|
if not self.initialized:
|
||||||
raise RuntimeError("Kubernetes client not initialized")
|
raise RuntimeError("Kubernetes client not initialized")
|
||||||
|
|
||||||
pods_data = []
|
pods_data = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Listar todos os pods em todos os namespaces
|
# List all pods in all namespaces
|
||||||
pods = self.v1.list_pod_for_all_namespaces(watch=False)
|
pods = self.v1.list_pod_for_all_namespaces(watch=False)
|
||||||
|
|
||||||
for pod in pods.items:
|
for pod in pods.items:
|
||||||
# Filtrar namespaces do sistema
|
# Filter system namespaces
|
||||||
if self._is_system_namespace(pod.metadata.namespace, include_system_namespaces):
|
if self._is_system_namespace(pod.metadata.namespace, include_system_namespaces):
|
||||||
continue
|
continue
|
||||||
pod_resource = PodResource(
|
pod_resource = PodResource(
|
||||||
@@ -80,7 +80,7 @@ class K8sClient:
|
|||||||
containers=[]
|
containers=[]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Processar containers do pod
|
# Process pod containers
|
||||||
for container in pod.spec.containers:
|
for container in pod.spec.containers:
|
||||||
container_resource = {
|
container_resource = {
|
||||||
"name": container.name,
|
"name": container.name,
|
||||||
@@ -91,7 +91,7 @@ class K8sClient:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Extrair requests e limits
|
# Extract requests and limits
|
||||||
if container.resources:
|
if container.resources:
|
||||||
if container.resources.requests:
|
if container.resources.requests:
|
||||||
container_resource["resources"]["requests"] = {
|
container_resource["resources"]["requests"] = {
|
||||||
@@ -106,7 +106,7 @@ class K8sClient:
|
|||||||
|
|
||||||
pods_data.append(pod_resource)
|
pods_data.append(pod_resource)
|
||||||
|
|
||||||
logger.info(f"Coletados {len(pods_data)} pods")
|
logger.info(f"Collected {len(pods_data)} pods")
|
||||||
return pods_data
|
return pods_data
|
||||||
|
|
||||||
except ApiException as e:
|
except ApiException as e:
|
||||||
@@ -114,13 +114,13 @@ class K8sClient:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
async def get_namespace_resources(self, namespace: str) -> NamespaceResources:
|
async def get_namespace_resources(self, namespace: str) -> NamespaceResources:
|
||||||
"""Coletar recursos de um namespace específico"""
|
"""Collect resources from a specific namespace"""
|
||||||
if not self.initialized:
|
if not self.initialized:
|
||||||
raise RuntimeError("Kubernetes client not initialized")
|
raise RuntimeError("Kubernetes client not initialized")
|
||||||
|
|
||||||
# Verificar se é namespace do sistema
|
# Check if it's a system namespace
|
||||||
if self._is_system_namespace(namespace):
|
if self._is_system_namespace(namespace):
|
||||||
logger.info(f"Namespace {namespace} é do sistema, retornando vazio")
|
logger.info(f"Namespace {namespace} is system, returning empty")
|
||||||
return NamespaceResources(
|
return NamespaceResources(
|
||||||
name=namespace,
|
name=namespace,
|
||||||
pods=[],
|
pods=[],
|
||||||
@@ -131,7 +131,7 @@ class K8sClient:
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Listar pods do namespace
|
# List namespace pods
|
||||||
pods = self.v1.list_namespaced_pod(namespace=namespace)
|
pods = self.v1.list_namespaced_pod(namespace=namespace)
|
||||||
|
|
||||||
namespace_resource = NamespaceResources(
|
namespace_resource = NamespaceResources(
|
||||||
@@ -183,28 +183,28 @@ class K8sClient:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
async def get_vpa_recommendations(self) -> List[VPARecommendation]:
|
async def get_vpa_recommendations(self) -> List[VPARecommendation]:
|
||||||
"""Coletar recomendações do VPA"""
|
"""Collect VPA recommendations"""
|
||||||
if not self.initialized:
|
if not self.initialized:
|
||||||
raise RuntimeError("Kubernetes client not initialized")
|
raise RuntimeError("Kubernetes client not initialized")
|
||||||
|
|
||||||
recommendations = []
|
recommendations = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# VPA não está disponível na API padrão do Kubernetes
|
# VPA is not available in the standard Kubernetes API
|
||||||
# TODO: Implementar usando Custom Resource Definition (CRD)
|
# TODO: Implement using Custom Resource Definition (CRD)
|
||||||
logger.warning("VPA não está disponível na API padrão do Kubernetes")
|
logger.warning("VPA is not available in the standard Kubernetes API")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
logger.info(f"Coletadas {len(recommendations)} recomendações VPA")
|
logger.info(f"Collected {len(recommendations)} VPA recommendations")
|
||||||
return recommendations
|
return recommendations
|
||||||
|
|
||||||
except ApiException as e:
|
except ApiException as e:
|
||||||
logger.error(f"Error collecting VPA recommendations: {e}")
|
logger.error(f"Error collecting VPA recommendations: {e}")
|
||||||
# VPA pode não estar instalado, retornar lista vazia
|
# VPA may not be installed, return empty list
|
||||||
return []
|
return []
|
||||||
|
|
||||||
async def get_nodes_info(self) -> List[Dict[str, Any]]:
|
async def get_nodes_info(self) -> List[Dict[str, Any]]:
|
||||||
"""Coletar informações dos nós do cluster"""
|
"""Collect cluster node information"""
|
||||||
if not self.initialized:
|
if not self.initialized:
|
||||||
raise RuntimeError("Kubernetes client not initialized")
|
raise RuntimeError("Kubernetes client not initialized")
|
||||||
|
|
||||||
@@ -221,19 +221,19 @@ class K8sClient:
|
|||||||
"conditions": []
|
"conditions": []
|
||||||
}
|
}
|
||||||
|
|
||||||
# Capacidade do nó
|
# Node capacity
|
||||||
if node.status.capacity:
|
if node.status.capacity:
|
||||||
node_info["capacity"] = {
|
node_info["capacity"] = {
|
||||||
k: v for k, v in node.status.capacity.items()
|
k: v for k, v in node.status.capacity.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
# Recursos alocáveis
|
# Allocatable resources
|
||||||
if node.status.allocatable:
|
if node.status.allocatable:
|
||||||
node_info["allocatable"] = {
|
node_info["allocatable"] = {
|
||||||
k: v for k, v in node.status.allocatable.items()
|
k: v for k, v in node.status.allocatable.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
# Condições do nó
|
# Node conditions
|
||||||
if node.status.conditions:
|
if node.status.conditions:
|
||||||
node_info["conditions"] = [
|
node_info["conditions"] = [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Cliente Prometheus para coleta de métricas
|
Prometheus client for metrics collection
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import aiohttp
|
import aiohttp
|
||||||
@@ -12,7 +12,7 @@ from app.core.config import settings
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class PrometheusClient:
|
class PrometheusClient:
|
||||||
"""Cliente para interação com Prometheus"""
|
"""Client for Prometheus interaction"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.base_url = settings.prometheus_url
|
self.base_url = settings.prometheus_url
|
||||||
@@ -20,25 +20,25 @@ class PrometheusClient:
|
|||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
"""Inicializar cliente Prometheus"""
|
"""Initialize Prometheus client"""
|
||||||
try:
|
try:
|
||||||
self.session = aiohttp.ClientSession()
|
self.session = aiohttp.ClientSession()
|
||||||
|
|
||||||
# Testar conexão
|
# Test connection
|
||||||
async with self.session.get(f"{self.base_url}/api/v1/query?query=up") as response:
|
async with self.session.get(f"{self.base_url}/api/v1/query?query=up") as response:
|
||||||
if response.status == 200:
|
if response.status == 200:
|
||||||
self.initialized = True
|
self.initialized = True
|
||||||
logger.info("Prometheus client initialized successfully")
|
logger.info("Prometheus client initialized successfully")
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Prometheus retornou status {response.status}")
|
logger.warning(f"Prometheus returned status {response.status}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error initializing Prometheus client: {e}")
|
logger.error(f"Error initializing Prometheus client: {e}")
|
||||||
# Prometheus pode não estar disponível, continuar sem ele
|
# Prometheus may not be available, continue without it
|
||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
|
||||||
async def query(self, query: str, time: Optional[datetime] = None) -> Dict[str, Any]:
|
async def query(self, query: str, time: Optional[datetime] = None) -> Dict[str, Any]:
|
||||||
"""Executar query no Prometheus"""
|
"""Execute query in Prometheus"""
|
||||||
if not self.initialized or not self.session:
|
if not self.initialized or not self.session:
|
||||||
return {"status": "error", "message": "Prometheus not available"}
|
return {"status": "error", "message": "Prometheus not available"}
|
||||||
|
|
||||||
@@ -63,17 +63,17 @@ class PrometheusClient:
|
|||||||
return {"status": "error", "message": str(e)}
|
return {"status": "error", "message": str(e)}
|
||||||
|
|
||||||
async def get_pod_cpu_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
|
async def get_pod_cpu_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
|
||||||
"""Obter uso de CPU de um pod específico"""
|
"""Get CPU usage for a specific pod"""
|
||||||
query = f'rate(container_cpu_usage_seconds_total{{namespace="{namespace}", pod="{pod_name}"}}[5m])'
|
query = f'rate(container_cpu_usage_seconds_total{{namespace="{namespace}", pod="{pod_name}"}}[5m])'
|
||||||
return await self.query(query)
|
return await self.query(query)
|
||||||
|
|
||||||
async def get_pod_memory_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
|
async def get_pod_memory_usage(self, namespace: str, pod_name: str) -> Dict[str, Any]:
|
||||||
"""Obter uso de memória de um pod específico"""
|
"""Get memory usage for a specific pod"""
|
||||||
query = f'container_memory_working_set_bytes{{namespace="{namespace}", pod="{pod_name}"}}'
|
query = f'container_memory_working_set_bytes{{namespace="{namespace}", pod="{pod_name}"}}'
|
||||||
return await self.query(query)
|
return await self.query(query)
|
||||||
|
|
||||||
async def get_namespace_resource_usage(self, namespace: str) -> Dict[str, Any]:
|
async def get_namespace_resource_usage(self, namespace: str) -> Dict[str, Any]:
|
||||||
"""Obter uso de recursos de um namespace"""
|
"""Get resource usage of a namespace"""
|
||||||
cpu_query = f'sum(rate(container_cpu_usage_seconds_total{{namespace="{namespace}"}}[5m]))'
|
cpu_query = f'sum(rate(container_cpu_usage_seconds_total{{namespace="{namespace}"}}[5m]))'
|
||||||
memory_query = f'sum(container_memory_working_set_bytes{{namespace="{namespace}"}})'
|
memory_query = f'sum(container_memory_working_set_bytes{{namespace="{namespace}"}})'
|
||||||
|
|
||||||
@@ -86,7 +86,7 @@ class PrometheusClient:
|
|||||||
}
|
}
|
||||||
|
|
||||||
async def get_cluster_overcommit(self) -> Dict[str, Any]:
|
async def get_cluster_overcommit(self) -> Dict[str, Any]:
|
||||||
"""Verificar overcommit no cluster"""
|
"""Check overcommit in cluster"""
|
||||||
# CPU overcommit
|
# CPU overcommit
|
||||||
cpu_capacity_query = 'sum(kube_node_status_capacity{resource="cpu"})'
|
cpu_capacity_query = 'sum(kube_node_status_capacity{resource="cpu"})'
|
||||||
cpu_requests_query = 'sum(kube_pod_container_resource_requests{resource="cpu"})'
|
cpu_requests_query = 'sum(kube_pod_container_resource_requests{resource="cpu"})'
|
||||||
@@ -112,7 +112,7 @@ class PrometheusClient:
|
|||||||
}
|
}
|
||||||
|
|
||||||
async def get_node_resource_usage(self) -> List[Dict[str, Any]]:
|
async def get_node_resource_usage(self) -> List[Dict[str, Any]]:
|
||||||
"""Obter uso de recursos por nó"""
|
"""Get resource usage by node"""
|
||||||
query = '''
|
query = '''
|
||||||
(
|
(
|
||||||
kube_node_status_capacity{resource="cpu"} or
|
kube_node_status_capacity{resource="cpu"} or
|
||||||
@@ -126,6 +126,6 @@ class PrometheusClient:
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
"""Fechar sessão HTTP"""
|
"""Close HTTP session"""
|
||||||
if self.session:
|
if self.session:
|
||||||
await self.session.close()
|
await self.session.close()
|
||||||
|
|||||||
22
app/main.py
22
app/main.py
@@ -1,6 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
OpenShift Resource Governance Tool
|
OpenShift Resource Governance Tool
|
||||||
Aplicação para governança de recursos no cluster OpenShift
|
Application for resource governance in OpenShift cluster
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
@@ -14,7 +14,7 @@ from app.api.routes import api_router
|
|||||||
from app.core.kubernetes_client import K8sClient
|
from app.core.kubernetes_client import K8sClient
|
||||||
from app.core.prometheus_client import PrometheusClient
|
from app.core.prometheus_client import PrometheusClient
|
||||||
|
|
||||||
# Configuração de logging
|
# Logging configuration
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=logging.INFO,
|
level=logging.INFO,
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
@@ -23,10 +23,10 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def lifespan(app: FastAPI):
|
async def lifespan(app: FastAPI):
|
||||||
"""Inicialização e cleanup da aplicação"""
|
"""Application initialization and cleanup"""
|
||||||
logger.info("Iniciando OpenShift Resource Governance Tool")
|
logger.info("Starting OpenShift Resource Governance Tool")
|
||||||
|
|
||||||
# Inicializar clientes
|
# Initialize clients
|
||||||
app.state.k8s_client = K8sClient()
|
app.state.k8s_client = K8sClient()
|
||||||
app.state.prometheus_client = PrometheusClient()
|
app.state.prometheus_client = PrometheusClient()
|
||||||
|
|
||||||
@@ -40,25 +40,25 @@ async def lifespan(app: FastAPI):
|
|||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
logger.info("Finalizando aplicação")
|
logger.info("Shutting down application")
|
||||||
|
|
||||||
# Criar aplicação FastAPI
|
# Create FastAPI application
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
title="OpenShift Resource Governance Tool",
|
title="OpenShift Resource Governance Tool",
|
||||||
description="Ferramenta de governança de recursos para clusters OpenShift",
|
description="Resource governance tool for OpenShift clusters",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
lifespan=lifespan
|
lifespan=lifespan
|
||||||
)
|
)
|
||||||
|
|
||||||
# Incluir rotas da API
|
# Include API routes
|
||||||
app.include_router(api_router, prefix="/api/v1")
|
app.include_router(api_router, prefix="/api/v1")
|
||||||
|
|
||||||
# Servir arquivos estáticos
|
# Serve static files
|
||||||
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
||||||
|
|
||||||
@app.get("/", response_class=HTMLResponse)
|
@app.get("/", response_class=HTMLResponse)
|
||||||
async def root():
|
async def root():
|
||||||
"""Página principal da aplicação"""
|
"""Main application page"""
|
||||||
with open("app/static/index.html", "r") as f:
|
with open("app/static/index.html", "r") as f:
|
||||||
return HTMLResponse(content=f.read())
|
return HTMLResponse(content=f.read())
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
"""
|
"""
|
||||||
Modelos de dados para recursos Kubernetes
|
Data models for Kubernetes resources
|
||||||
"""
|
"""
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
class ContainerResource(BaseModel):
|
class ContainerResource(BaseModel):
|
||||||
"""Recursos de um container"""
|
"""Container resources"""
|
||||||
name: str
|
name: str
|
||||||
image: str
|
image: str
|
||||||
resources: Dict[str, Dict[str, str]]
|
resources: Dict[str, Dict[str, str]]
|
||||||
|
|
||||||
class PodResource(BaseModel):
|
class PodResource(BaseModel):
|
||||||
"""Recursos de um pod"""
|
"""Pod resources"""
|
||||||
name: str
|
name: str
|
||||||
namespace: str
|
namespace: str
|
||||||
node_name: Optional[str] = None
|
node_name: Optional[str] = None
|
||||||
@@ -19,7 +19,7 @@ class PodResource(BaseModel):
|
|||||||
containers: List[ContainerResource]
|
containers: List[ContainerResource]
|
||||||
|
|
||||||
class NamespaceResources(BaseModel):
|
class NamespaceResources(BaseModel):
|
||||||
"""Recursos de um namespace"""
|
"""Namespace resources"""
|
||||||
name: str
|
name: str
|
||||||
pods: List[PodResource]
|
pods: List[PodResource]
|
||||||
total_cpu_requests: str = "0"
|
total_cpu_requests: str = "0"
|
||||||
@@ -28,14 +28,14 @@ class NamespaceResources(BaseModel):
|
|||||||
total_memory_limits: str = "0"
|
total_memory_limits: str = "0"
|
||||||
|
|
||||||
class VPARecommendation(BaseModel):
|
class VPARecommendation(BaseModel):
|
||||||
"""Recomendação do VPA"""
|
"""VPA recommendation"""
|
||||||
name: str
|
name: str
|
||||||
namespace: str
|
namespace: str
|
||||||
target_ref: Dict[str, str]
|
target_ref: Dict[str, str]
|
||||||
recommendations: Dict[str, Any]
|
recommendations: Dict[str, Any]
|
||||||
|
|
||||||
class ResourceValidation(BaseModel):
|
class ResourceValidation(BaseModel):
|
||||||
"""Resultado de validação de recursos"""
|
"""Resource validation result"""
|
||||||
pod_name: str
|
pod_name: str
|
||||||
namespace: str
|
namespace: str
|
||||||
container_name: str
|
container_name: str
|
||||||
@@ -72,7 +72,7 @@ class ExportRequest(BaseModel):
|
|||||||
include_validations: bool = True
|
include_validations: bool = True
|
||||||
|
|
||||||
class ApplyRecommendationRequest(BaseModel):
|
class ApplyRecommendationRequest(BaseModel):
|
||||||
"""Request para aplicar recomendação"""
|
"""Request to apply recommendation"""
|
||||||
pod_name: str
|
pod_name: str
|
||||||
namespace: str
|
namespace: str
|
||||||
container_name: str
|
container_name: str
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Serviço de análise histórica usando métricas do Prometheus
|
Historical analysis service using Prometheus metrics
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
@@ -14,16 +14,16 @@ from app.core.config import settings
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class HistoricalAnalysisService:
|
class HistoricalAnalysisService:
|
||||||
"""Serviço para análise histórica de recursos usando Prometheus"""
|
"""Service for historical resource analysis using Prometheus"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.prometheus_url = settings.prometheus_url
|
self.prometheus_url = settings.prometheus_url
|
||||||
self.time_ranges = {
|
self.time_ranges = {
|
||||||
'1h': 3600, # 1 hora
|
'1h': 3600, # 1 hour
|
||||||
'6h': 21600, # 6 horas
|
'6h': 21600, # 6 hours
|
||||||
'24h': 86400, # 24 horas
|
'24h': 86400, # 24 hours
|
||||||
'7d': 604800, # 7 dias
|
'7d': 604800, # 7 days
|
||||||
'30d': 2592000 # 30 dias
|
'30d': 2592000 # 30 days
|
||||||
}
|
}
|
||||||
|
|
||||||
async def analyze_pod_historical_usage(
|
async def analyze_pod_historical_usage(
|
||||||
@@ -31,7 +31,7 @@ class HistoricalAnalysisService:
|
|||||||
pod: PodResource,
|
pod: PodResource,
|
||||||
time_range: str = '24h'
|
time_range: str = '24h'
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Analisar uso histórico de um pod"""
|
"""Analyze historical usage of a pod"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
if time_range not in self.time_ranges:
|
if time_range not in self.time_ranges:
|
||||||
@@ -41,13 +41,13 @@ class HistoricalAnalysisService:
|
|||||||
start_time = end_time - timedelta(seconds=self.time_ranges[time_range])
|
start_time = end_time - timedelta(seconds=self.time_ranges[time_range])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Analisar CPU
|
# Analyze CPU
|
||||||
cpu_analysis = await self._analyze_cpu_usage(
|
cpu_analysis = await self._analyze_cpu_usage(
|
||||||
pod, start_time, end_time, time_range
|
pod, start_time, end_time, time_range
|
||||||
)
|
)
|
||||||
validations.extend(cpu_analysis)
|
validations.extend(cpu_analysis)
|
||||||
|
|
||||||
# Analisar memória
|
# Analyze memory
|
||||||
memory_analysis = await self._analyze_memory_usage(
|
memory_analysis = await self._analyze_memory_usage(
|
||||||
pod, start_time, end_time, time_range
|
pod, start_time, end_time, time_range
|
||||||
)
|
)
|
||||||
@@ -74,14 +74,14 @@ class HistoricalAnalysisService:
|
|||||||
end_time: datetime,
|
end_time: datetime,
|
||||||
time_range: str
|
time_range: str
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Analisar uso histórico de CPU"""
|
"""Analyze historical CPU usage"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
for container in pod.containers:
|
for container in pod.containers:
|
||||||
container_name = container["name"]
|
container_name = container["name"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Query para CPU usage rate
|
# Query for CPU usage rate
|
||||||
cpu_query = f'''
|
cpu_query = f'''
|
||||||
rate(container_cpu_usage_seconds_total{{
|
rate(container_cpu_usage_seconds_total{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -92,7 +92,7 @@ class HistoricalAnalysisService:
|
|||||||
}}[{time_range}])
|
}}[{time_range}])
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para CPU requests
|
# Query for CPU requests
|
||||||
cpu_requests_query = f'''
|
cpu_requests_query = f'''
|
||||||
kube_pod_container_resource_requests{{
|
kube_pod_container_resource_requests{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -101,7 +101,7 @@ class HistoricalAnalysisService:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para CPU limits
|
# Query for CPU limits
|
||||||
cpu_limits_query = f'''
|
cpu_limits_query = f'''
|
||||||
kube_pod_container_resource_limits{{
|
kube_pod_container_resource_limits{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -110,7 +110,7 @@ class HistoricalAnalysisService:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Executar queries
|
# Execute queries
|
||||||
cpu_usage = await self._query_prometheus(cpu_query, start_time, end_time)
|
cpu_usage = await self._query_prometheus(cpu_query, start_time, end_time)
|
||||||
cpu_requests = await self._query_prometheus(cpu_requests_query, start_time, end_time)
|
cpu_requests = await self._query_prometheus(cpu_requests_query, start_time, end_time)
|
||||||
cpu_limits = await self._query_prometheus(cpu_limits_query, start_time, end_time)
|
cpu_limits = await self._query_prometheus(cpu_limits_query, start_time, end_time)
|
||||||
@@ -134,14 +134,14 @@ class HistoricalAnalysisService:
|
|||||||
end_time: datetime,
|
end_time: datetime,
|
||||||
time_range: str
|
time_range: str
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Analisar uso histórico de memória"""
|
"""Analyze historical memory usage"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
for container in pod.containers:
|
for container in pod.containers:
|
||||||
container_name = container["name"]
|
container_name = container["name"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Query para memória usage
|
# Query for memory usage
|
||||||
memory_query = f'''
|
memory_query = f'''
|
||||||
container_memory_working_set_bytes{{
|
container_memory_working_set_bytes{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -152,7 +152,7 @@ class HistoricalAnalysisService:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para memória requests
|
# Query for memory requests
|
||||||
memory_requests_query = f'''
|
memory_requests_query = f'''
|
||||||
kube_pod_container_resource_requests{{
|
kube_pod_container_resource_requests{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -161,7 +161,7 @@ class HistoricalAnalysisService:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para memória limits
|
# Query for memory limits
|
||||||
memory_limits_query = f'''
|
memory_limits_query = f'''
|
||||||
kube_pod_container_resource_limits{{
|
kube_pod_container_resource_limits{{
|
||||||
pod="{pod.name}",
|
pod="{pod.name}",
|
||||||
@@ -170,7 +170,7 @@ class HistoricalAnalysisService:
|
|||||||
}}
|
}}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Executar queries
|
# Execute queries
|
||||||
memory_usage = await self._query_prometheus(memory_query, start_time, end_time)
|
memory_usage = await self._query_prometheus(memory_query, start_time, end_time)
|
||||||
memory_requests = await self._query_prometheus(memory_requests_query, start_time, end_time)
|
memory_requests = await self._query_prometheus(memory_requests_query, start_time, end_time)
|
||||||
memory_limits = await self._query_prometheus(memory_limits_query, start_time, end_time)
|
memory_limits = await self._query_prometheus(memory_limits_query, start_time, end_time)
|
||||||
@@ -197,22 +197,22 @@ class HistoricalAnalysisService:
|
|||||||
limits_data: List[Dict],
|
limits_data: List[Dict],
|
||||||
time_range: str
|
time_range: str
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Analisar métricas de CPU"""
|
"""Analyze CPU metrics"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
if not usage_data or not requests_data:
|
if not usage_data or not requests_data:
|
||||||
return validations
|
return validations
|
||||||
|
|
||||||
# Calcular estatísticas de uso
|
# Calculate usage statistics
|
||||||
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
|
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
|
||||||
if not usage_values:
|
if not usage_values:
|
||||||
return validations
|
return validations
|
||||||
|
|
||||||
# Valores atuais de requests/limits
|
# Current values of requests/limits
|
||||||
current_requests = float(requests_data[0][1]) if requests_data else 0
|
current_requests = float(requests_data[0][1]) if requests_data else 0
|
||||||
current_limits = float(limits_data[0][1]) if limits_data else 0
|
current_limits = float(limits_data[0][1]) if limits_data else 0
|
||||||
|
|
||||||
# Estatísticas de uso
|
# Usage statistics
|
||||||
avg_usage = sum(usage_values) / len(usage_values)
|
avg_usage = sum(usage_values) / len(usage_values)
|
||||||
max_usage = max(usage_values)
|
max_usage = max(usage_values)
|
||||||
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
|
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
|
||||||
@@ -282,28 +282,28 @@ class HistoricalAnalysisService:
|
|||||||
limits_data: List[Dict],
|
limits_data: List[Dict],
|
||||||
time_range: str
|
time_range: str
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Analisar métricas de memória"""
|
"""Analyze memory metrics"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
if not usage_data or not requests_data:
|
if not usage_data or not requests_data:
|
||||||
return validations
|
return validations
|
||||||
|
|
||||||
# Calcular estatísticas de uso
|
# Calculate usage statistics
|
||||||
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
|
usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN']
|
||||||
if not usage_values:
|
if not usage_values:
|
||||||
return validations
|
return validations
|
||||||
|
|
||||||
# Valores atuais de requests/limits (em bytes)
|
# Current values of requests/limits (in bytes)
|
||||||
current_requests = float(requests_data[0][1]) if requests_data else 0
|
current_requests = float(requests_data[0][1]) if requests_data else 0
|
||||||
current_limits = float(limits_data[0][1]) if limits_data else 0
|
current_limits = float(limits_data[0][1]) if limits_data else 0
|
||||||
|
|
||||||
# Estatísticas de uso
|
# Usage statistics
|
||||||
avg_usage = sum(usage_values) / len(usage_values)
|
avg_usage = sum(usage_values) / len(usage_values)
|
||||||
max_usage = max(usage_values)
|
max_usage = max(usage_values)
|
||||||
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
|
p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)]
|
||||||
p99_usage = sorted(usage_values)[int(len(usage_values) * 0.99)]
|
p99_usage = sorted(usage_values)[int(len(usage_values) * 0.99)]
|
||||||
|
|
||||||
# Converter para MiB para melhor legibilidade
|
# Convert to MiB for better readability
|
||||||
def bytes_to_mib(bytes_value):
|
def bytes_to_mib(bytes_value):
|
||||||
return bytes_value / (1024 * 1024)
|
return bytes_value / (1024 * 1024)
|
||||||
|
|
||||||
@@ -362,14 +362,14 @@ class HistoricalAnalysisService:
|
|||||||
return validations
|
return validations
|
||||||
|
|
||||||
async def _query_prometheus(self, query: str, start_time: datetime, end_time: datetime) -> List[Dict]:
|
async def _query_prometheus(self, query: str, start_time: datetime, end_time: datetime) -> List[Dict]:
|
||||||
"""Executar query no Prometheus"""
|
"""Execute query in Prometheus"""
|
||||||
try:
|
try:
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
params = {
|
params = {
|
||||||
'query': query,
|
'query': query,
|
||||||
'start': start_time.timestamp(),
|
'start': start_time.timestamp(),
|
||||||
'end': end_time.timestamp(),
|
'end': end_time.timestamp(),
|
||||||
'step': '60s' # 1 minuto de resolução
|
'step': '60s' # 1 minute resolution
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.get(
|
async with session.get(
|
||||||
@@ -389,9 +389,9 @@ class HistoricalAnalysisService:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
async def get_cluster_historical_summary(self, time_range: str = '24h') -> Dict[str, Any]:
|
async def get_cluster_historical_summary(self, time_range: str = '24h') -> Dict[str, Any]:
|
||||||
"""Obter resumo histórico do cluster"""
|
"""Get cluster historical summary"""
|
||||||
try:
|
try:
|
||||||
# Query para CPU total do cluster
|
# Query for total cluster CPU
|
||||||
cpu_query = f'''
|
cpu_query = f'''
|
||||||
sum(rate(container_cpu_usage_seconds_total{{
|
sum(rate(container_cpu_usage_seconds_total{{
|
||||||
container!="POD",
|
container!="POD",
|
||||||
@@ -399,7 +399,7 @@ class HistoricalAnalysisService:
|
|||||||
}}[{time_range}]))
|
}}[{time_range}]))
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para memória total do cluster
|
# Query for total cluster memory
|
||||||
memory_query = f'''
|
memory_query = f'''
|
||||||
sum(container_memory_working_set_bytes{{
|
sum(container_memory_working_set_bytes{{
|
||||||
container!="POD",
|
container!="POD",
|
||||||
@@ -407,7 +407,7 @@ class HistoricalAnalysisService:
|
|||||||
}})
|
}})
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Query para requests totais
|
# Query for total requests
|
||||||
cpu_requests_query = f'''
|
cpu_requests_query = f'''
|
||||||
sum(kube_pod_container_resource_requests{{resource="cpu"}})
|
sum(kube_pod_container_resource_requests{{resource="cpu"}})
|
||||||
'''
|
'''
|
||||||
@@ -416,7 +416,7 @@ class HistoricalAnalysisService:
|
|||||||
sum(kube_pod_container_resource_requests{{resource="memory"}})
|
sum(kube_pod_container_resource_requests{{resource="memory"}})
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Executar queries
|
# Execute queries
|
||||||
cpu_usage = await self._query_prometheus(cpu_query,
|
cpu_usage = await self._query_prometheus(cpu_query,
|
||||||
datetime.now() - timedelta(seconds=self.time_ranges[time_range]),
|
datetime.now() - timedelta(seconds=self.time_ranges[time_range]),
|
||||||
datetime.now())
|
datetime.now())
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ class ReportService:
|
|||||||
filename = f"cluster_report_{timestamp}.json"
|
filename = f"cluster_report_{timestamp}.json"
|
||||||
filepath = os.path.join(self.export_path, filename)
|
filepath = os.path.join(self.export_path, filename)
|
||||||
|
|
||||||
# Converter para dict para serialização
|
# Convert to dict for serialization
|
||||||
report_dict = report.dict()
|
report_dict = report.dict()
|
||||||
|
|
||||||
with open(filepath, 'w', encoding='utf-8') as f:
|
with open(filepath, 'w', encoding='utf-8') as f:
|
||||||
@@ -198,7 +198,7 @@ class ReportService:
|
|||||||
with open(filepath, 'w', newline='', encoding='utf-8') as f:
|
with open(filepath, 'w', newline='', encoding='utf-8') as f:
|
||||||
writer = csv.writer(f)
|
writer = csv.writer(f)
|
||||||
|
|
||||||
# Cabeçalho
|
# Header
|
||||||
writer.writerow([
|
writer.writerow([
|
||||||
"Pod Name", "Namespace", "Container Name",
|
"Pod Name", "Namespace", "Container Name",
|
||||||
"Validation Type", "Severity", "Message", "Recommendation"
|
"Validation Type", "Severity", "Message", "Recommendation"
|
||||||
@@ -234,12 +234,12 @@ class ReportService:
|
|||||||
styles = getSampleStyleSheet()
|
styles = getSampleStyleSheet()
|
||||||
story = []
|
story = []
|
||||||
|
|
||||||
# Título
|
# Title
|
||||||
title = Paragraph("OpenShift Resource Governance Report", styles['Title'])
|
title = Paragraph("OpenShift Resource Governance Report", styles['Title'])
|
||||||
story.append(title)
|
story.append(title)
|
||||||
story.append(Spacer(1, 12))
|
story.append(Spacer(1, 12))
|
||||||
|
|
||||||
# Resumo
|
# Summary
|
||||||
summary_text = f"""
|
summary_text = f"""
|
||||||
<b>Cluster Summary:</b><br/>
|
<b>Cluster Summary:</b><br/>
|
||||||
Total Pods: {report.total_pods}<br/>
|
Total Pods: {report.total_pods}<br/>
|
||||||
@@ -276,7 +276,7 @@ class ReportService:
|
|||||||
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
||||||
]))
|
]))
|
||||||
|
|
||||||
story.append(Paragraph("<b>Validações:</b>", styles['Heading2']))
|
story.append(Paragraph("<b>Validations:</b>", styles['Heading2']))
|
||||||
story.append(table)
|
story.append(table)
|
||||||
|
|
||||||
doc.build(story)
|
doc.build(story)
|
||||||
|
|||||||
@@ -40,10 +40,10 @@ class ValidationService:
|
|||||||
time_range: str = '24h'
|
time_range: str = '24h'
|
||||||
) -> List[ResourceValidation]:
|
) -> List[ResourceValidation]:
|
||||||
"""Validate pod resources including historical analysis"""
|
"""Validate pod resources including historical analysis"""
|
||||||
# Validações estáticas
|
# Static validations
|
||||||
static_validations = self.validate_pod_resources(pod)
|
static_validations = self.validate_pod_resources(pod)
|
||||||
|
|
||||||
# Análise histórica
|
# Historical analysis
|
||||||
try:
|
try:
|
||||||
historical_validations = await self.historical_analysis.analyze_pod_historical_usage(
|
historical_validations = await self.historical_analysis.analyze_pod_historical_usage(
|
||||||
pod, time_range
|
pod, time_range
|
||||||
@@ -66,7 +66,7 @@ class ValidationService:
|
|||||||
requests = resources.get("requests", {})
|
requests = resources.get("requests", {})
|
||||||
limits = resources.get("limits", {})
|
limits = resources.get("limits", {})
|
||||||
|
|
||||||
# 1. Verificar se requests estão definidos
|
# 1. Check if requests are defined
|
||||||
if not requests:
|
if not requests:
|
||||||
validations.append(ResourceValidation(
|
validations.append(ResourceValidation(
|
||||||
pod_name=pod_name,
|
pod_name=pod_name,
|
||||||
@@ -78,7 +78,7 @@ class ValidationService:
|
|||||||
recommendation="Define CPU and memory requests to guarantee QoS"
|
recommendation="Define CPU and memory requests to guarantee QoS"
|
||||||
))
|
))
|
||||||
|
|
||||||
# 2. Verificar se limits estão definidos
|
# 2. Check if limits are defined
|
||||||
if not limits:
|
if not limits:
|
||||||
validations.append(ResourceValidation(
|
validations.append(ResourceValidation(
|
||||||
pod_name=pod_name,
|
pod_name=pod_name,
|
||||||
@@ -213,7 +213,7 @@ class ValidationService:
|
|||||||
"""Validate minimum request values"""
|
"""Validate minimum request values"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
# Validar CPU mínima
|
# Validate minimum CPU
|
||||||
if "cpu" in requests:
|
if "cpu" in requests:
|
||||||
try:
|
try:
|
||||||
request_value = self._parse_cpu_value(requests["cpu"])
|
request_value = self._parse_cpu_value(requests["cpu"])
|
||||||
@@ -232,7 +232,7 @@ class ValidationService:
|
|||||||
except (ValueError, InvalidOperation):
|
except (ValueError, InvalidOperation):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Validar memória mínima
|
# Validate minimum memory
|
||||||
if "memory" in requests:
|
if "memory" in requests:
|
||||||
try:
|
try:
|
||||||
request_value = self._parse_memory_value(requests["memory"])
|
request_value = self._parse_memory_value(requests["memory"])
|
||||||
@@ -254,7 +254,7 @@ class ValidationService:
|
|||||||
return validations
|
return validations
|
||||||
|
|
||||||
def _parse_cpu_value(self, value: str) -> float:
|
def _parse_cpu_value(self, value: str) -> float:
|
||||||
"""Converter valor de CPU para float (cores)"""
|
"""Convert CPU value to float (cores)"""
|
||||||
if value.endswith('m'):
|
if value.endswith('m'):
|
||||||
return float(value[:-1]) / 1000
|
return float(value[:-1]) / 1000
|
||||||
elif value.endswith('n'):
|
elif value.endswith('n'):
|
||||||
@@ -263,7 +263,7 @@ class ValidationService:
|
|||||||
return float(value)
|
return float(value)
|
||||||
|
|
||||||
def _parse_memory_value(self, value: str) -> int:
|
def _parse_memory_value(self, value: str) -> int:
|
||||||
"""Converter valor de memória para bytes"""
|
"""Convert memory value to bytes"""
|
||||||
value = value.upper()
|
value = value.upper()
|
||||||
|
|
||||||
if value.endswith('KI'):
|
if value.endswith('KI'):
|
||||||
@@ -289,15 +289,15 @@ class ValidationService:
|
|||||||
"""Validate overcommit in a namespace"""
|
"""Validate overcommit in a namespace"""
|
||||||
validations = []
|
validations = []
|
||||||
|
|
||||||
# Calcular total de requests do namespace
|
# Calculate total namespace requests
|
||||||
total_cpu_requests = self._parse_cpu_value(namespace_resources.total_cpu_requests)
|
total_cpu_requests = self._parse_cpu_value(namespace_resources.total_cpu_requests)
|
||||||
total_memory_requests = self._parse_memory_value(namespace_resources.total_memory_requests)
|
total_memory_requests = self._parse_memory_value(namespace_resources.total_memory_requests)
|
||||||
|
|
||||||
# Calcular capacidade total dos nós
|
# Calculate total node capacity
|
||||||
total_cpu_capacity = self._parse_cpu_value(node_capacity.get("cpu", "0"))
|
total_cpu_capacity = self._parse_cpu_value(node_capacity.get("cpu", "0"))
|
||||||
total_memory_capacity = self._parse_memory_value(node_capacity.get("memory", "0"))
|
total_memory_capacity = self._parse_memory_value(node_capacity.get("memory", "0"))
|
||||||
|
|
||||||
# Verificar overcommit de CPU
|
# Check CPU overcommit
|
||||||
if total_cpu_capacity > 0:
|
if total_cpu_capacity > 0:
|
||||||
cpu_utilization = (total_cpu_requests / total_cpu_capacity) * 100
|
cpu_utilization = (total_cpu_requests / total_cpu_capacity) * 100
|
||||||
if cpu_utilization > 100:
|
if cpu_utilization > 100:
|
||||||
@@ -311,7 +311,7 @@ class ValidationService:
|
|||||||
recommendation="Reduce CPU requests or add more nodes to the cluster"
|
recommendation="Reduce CPU requests or add more nodes to the cluster"
|
||||||
))
|
))
|
||||||
|
|
||||||
# Verificar overcommit de memória
|
# Check memory overcommit
|
||||||
if total_memory_capacity > 0:
|
if total_memory_capacity > 0:
|
||||||
memory_utilization = (total_memory_requests / total_memory_capacity) * 100
|
memory_utilization = (total_memory_requests / total_memory_capacity) * 100
|
||||||
if memory_utilization > 100:
|
if memory_utilization > 100:
|
||||||
@@ -331,7 +331,7 @@ class ValidationService:
|
|||||||
"""Generate recommendations based on validations"""
|
"""Generate recommendations based on validations"""
|
||||||
recommendations = []
|
recommendations = []
|
||||||
|
|
||||||
# Agrupar validações por tipo
|
# Group validations by type
|
||||||
validation_counts = {}
|
validation_counts = {}
|
||||||
for validation in validations:
|
for validation in validations:
|
||||||
validation_type = validation.validation_type
|
validation_type = validation.validation_type
|
||||||
@@ -339,7 +339,7 @@ class ValidationService:
|
|||||||
validation_counts[validation_type] = 0
|
validation_counts[validation_type] = 0
|
||||||
validation_counts[validation_type] += 1
|
validation_counts[validation_type] += 1
|
||||||
|
|
||||||
# Gerar recomendações baseadas nos problemas encontrados
|
# Generate recommendations based on found issues
|
||||||
if validation_counts.get("missing_requests", 0) > 0:
|
if validation_counts.get("missing_requests", 0) > 0:
|
||||||
recommendations.append(
|
recommendations.append(
|
||||||
f"Implement LimitRange in namespace to define default requests "
|
f"Implement LimitRange in namespace to define default requests "
|
||||||
|
|||||||
@@ -1,66 +1,66 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de deploy local para OpenShift
|
# Local deployment script for OpenShift
|
||||||
# Uso: ./deploy-local.sh [TAG_DA_IMAGEM]
|
# Usage: ./deploy-local.sh [IMAGE_TAG]
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
REGISTRY="andersonid"
|
REGISTRY="andersonid"
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
TAG=${1:-"latest"}
|
TAG=${1:-"latest"}
|
||||||
|
|
||||||
echo "🚀 Deploy Local para OpenShift"
|
echo "Local Deploy to OpenShift"
|
||||||
echo "================================"
|
echo "========================="
|
||||||
echo "Imagem: $REGISTRY/$IMAGE_NAME:$TAG"
|
echo "Image: $REGISTRY/$IMAGE_NAME:$TAG"
|
||||||
echo "Namespace: $NAMESPACE"
|
echo "Namespace: $NAMESPACE"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Verificar se está logado no OpenShift
|
# Check if logged into OpenShift
|
||||||
if ! oc whoami > /dev/null 2>&1; then
|
if ! oc whoami > /dev/null 2>&1; then
|
||||||
echo "❌ Não está logado no OpenShift. Execute: oc login"
|
echo "ERROR: Not logged into OpenShift. Run: oc login"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "✅ Logado no OpenShift como: $(oc whoami)"
|
echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Aplicar manifests
|
# Apply manifests
|
||||||
echo "📋 Aplicando manifests..."
|
echo "Applying manifests..."
|
||||||
oc apply -f k8s/namespace.yaml
|
oc apply -f k8s/namespace.yaml
|
||||||
oc apply -f k8s/rbac.yaml
|
oc apply -f k8s/rbac.yaml
|
||||||
oc apply -f k8s/configmap.yaml
|
oc apply -f k8s/configmap.yaml
|
||||||
|
|
||||||
# Atualizar imagem do deployment
|
# Update deployment image
|
||||||
echo "🔄 Atualizando imagem do deployment..."
|
echo "Updating deployment image..."
|
||||||
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$REGISTRY/$IMAGE_NAME:$TAG -n $NAMESPACE || true
|
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$REGISTRY/$IMAGE_NAME:$TAG -n $NAMESPACE || true
|
||||||
|
|
||||||
# Aplicar deployment, service e route
|
# Apply deployment, service and route
|
||||||
echo "📦 Aplicando deployment, service e route..."
|
echo "Applying deployment, service and route..."
|
||||||
oc apply -f k8s/deployment.yaml
|
oc apply -f k8s/deployment.yaml
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
oc apply -f k8s/route.yaml
|
oc apply -f k8s/route.yaml
|
||||||
|
|
||||||
# Aguardar rollout
|
# Wait for rollout
|
||||||
echo "⏳ Aguardando rollout..."
|
echo "Waiting for rollout..."
|
||||||
oc rollout status deployment/$IMAGE_NAME -n $NAMESPACE --timeout=300s
|
oc rollout status deployment/$IMAGE_NAME -n $NAMESPACE --timeout=300s
|
||||||
|
|
||||||
# Verificar deployment
|
# Verify deployment
|
||||||
echo "✅ Verificando deployment..."
|
echo "Verifying deployment..."
|
||||||
oc get deployment $IMAGE_NAME -n $NAMESPACE
|
oc get deployment $IMAGE_NAME -n $NAMESPACE
|
||||||
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
||||||
|
|
||||||
# Obter URL da rota
|
# Get route URL
|
||||||
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
||||||
if [ -n "$ROUTE_URL" ]; then
|
if [ -n "$ROUTE_URL" ]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "🚀 Application deployed successfully!"
|
echo "Application deployed successfully!"
|
||||||
echo "🌐 URL: https://$ROUTE_URL"
|
echo "URL: https://$ROUTE_URL"
|
||||||
echo "📊 Status: oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME"
|
echo "Status: oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME"
|
||||||
else
|
else
|
||||||
echo "⚠️ Rota não encontrada. Verifique: oc get routes -n $NAMESPACE"
|
echo "WARNING: Route not found. Check: oc get routes -n $NAMESPACE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "✅ Deploy concluído!"
|
echo "Deploy completed!"
|
||||||
|
|||||||
@@ -1,82 +1,82 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script para deploy da aplicação OpenShift Resource Governance
|
# Script for deploying OpenShift Resource Governance application
|
||||||
# Funciona com qualquer cluster OpenShift (público ou privado)
|
# Works with any OpenShift cluster (public or private)
|
||||||
|
|
||||||
# Variáveis
|
# Variables
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
IMAGE_TAG=${1:-latest} # Usa o primeiro argumento como tag, ou 'latest' por padrão
|
IMAGE_TAG=${1:-latest} # Use first argument as tag, or 'latest' by default
|
||||||
|
|
||||||
echo "🚀 Deploy para OpenShift Cluster"
|
echo "Deploy to OpenShift Cluster"
|
||||||
echo "================================"
|
echo "==========================="
|
||||||
echo "Imagem: ${IMAGE_TAG}"
|
echo "Image: ${IMAGE_TAG}"
|
||||||
echo "Namespace: ${NAMESPACE}"
|
echo "Namespace: ${NAMESPACE}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 1. Verificar login no OpenShift
|
# 1. Check OpenShift login
|
||||||
if ! oc whoami > /dev/null 2>&1; then
|
if ! oc whoami > /dev/null 2>&1; then
|
||||||
echo "❌ Não logado no OpenShift. Por favor, faça login com 'oc login'."
|
echo "ERROR: Not logged into OpenShift. Please login with 'oc login'."
|
||||||
echo "💡 Exemplo: oc login https://your-cluster.com"
|
echo "Example: oc login https://your-cluster.com"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "✅ Logado no OpenShift como: $(oc whoami)"
|
echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 2. Verificar se o namespace existe, senão criar
|
# 2. Check if namespace exists, create if not
|
||||||
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
|
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
|
||||||
echo "📋 Criando namespace ${NAMESPACE}..."
|
echo "Creating namespace ${NAMESPACE}..."
|
||||||
oc create namespace ${NAMESPACE}
|
oc create namespace ${NAMESPACE}
|
||||||
else
|
else
|
||||||
echo "✅ Namespace ${NAMESPACE} já existe"
|
echo "SUCCESS: Namespace ${NAMESPACE} already exists"
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 3. Aplicar manifests básicos (rbac, configmap)
|
# 3. Apply basic manifests (rbac, configmap)
|
||||||
echo "📋 Aplicando manifests..."
|
echo "Applying manifests..."
|
||||||
oc apply -f k8s/rbac.yaml
|
oc apply -f k8s/rbac.yaml
|
||||||
oc apply -f k8s/configmap.yaml
|
oc apply -f k8s/configmap.yaml
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 4. Atualizar deployment com a nova imagem
|
# 4. Update deployment with new image
|
||||||
echo "🔄 Atualizando imagem do deployment..."
|
echo "Updating deployment image..."
|
||||||
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${IMAGE_TAG} -n ${NAMESPACE} || true
|
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${IMAGE_TAG} -n ${NAMESPACE} || true
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 5. Aplicar deployment, service e route
|
# 5. Apply deployment, service and route
|
||||||
echo "📦 Aplicando deployment, service e route..."
|
echo "Applying deployment, service and route..."
|
||||||
oc apply -f k8s/deployment.yaml
|
oc apply -f k8s/deployment.yaml
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
oc apply -f k8s/route.yaml
|
oc apply -f k8s/route.yaml
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 6. Aguardar rollout
|
# 6. Wait for rollout
|
||||||
echo "⏳ Aguardando rollout..."
|
echo "Waiting for rollout..."
|
||||||
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
||||||
echo "✅ Rollout concluído com sucesso!"
|
echo "SUCCESS: Rollout completed successfully!"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 7. Verificar deployment
|
# 7. Verify deployment
|
||||||
echo "✅ Verificando deployment..."
|
echo "Verifying deployment..."
|
||||||
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
|
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
|
||||||
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
|
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 8. Obter URL da rota
|
# 8. Get route URL
|
||||||
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
||||||
if [ -n "$ROUTE_URL" ]; then
|
if [ -n "$ROUTE_URL" ]; then
|
||||||
echo "🚀 Application deployed successfully!"
|
echo "Application deployed successfully!"
|
||||||
echo "🌐 URL: https://$ROUTE_URL"
|
echo "URL: https://$ROUTE_URL"
|
||||||
echo "📊 Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
|
echo "Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
|
||||||
else
|
else
|
||||||
echo "⚠️ Rota não encontrada. Verifique se o cluster suporta Routes."
|
echo "WARNING: Route not found. Check if cluster supports Routes."
|
||||||
echo "💡 Para acessar localmente: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
|
echo "For local access: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo "✅ Deploy concluído!"
|
echo "Deploy completed!"
|
||||||
echo ""
|
echo ""
|
||||||
echo "🔧 Comandos úteis:"
|
echo "Useful commands:"
|
||||||
echo " Ver logs: oc logs -f deployment/${IMAGE_NAME} -n ${NAMESPACE}"
|
echo " View logs: oc logs -f deployment/${IMAGE_NAME} -n ${NAMESPACE}"
|
||||||
echo " Port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
|
echo " Port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n ${NAMESPACE}"
|
||||||
echo " Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
|
echo " Status: oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}"
|
||||||
|
|||||||
@@ -1,145 +1,145 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de deploy com ZERO DOWNTIME (Blue-Green Strategy)
|
# Zero downtime deployment script (Blue-Green Strategy)
|
||||||
# Garante que a aplicação nunca saia do ar durante atualizações
|
# Ensures application never goes down during updates
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
REGISTRY="andersonid"
|
REGISTRY="andersonid"
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
TAG=${1:-"latest"}
|
TAG=${1:-"latest"}
|
||||||
FULL_IMAGE="$REGISTRY/$IMAGE_NAME:$TAG"
|
FULL_IMAGE="$REGISTRY/$IMAGE_NAME:$TAG"
|
||||||
|
|
||||||
echo "🚀 Deploy ZERO DOWNTIME para OpenShift"
|
echo "Zero Downtime Deploy to OpenShift"
|
||||||
echo "======================================"
|
echo "================================="
|
||||||
echo "Imagem: $FULL_IMAGE"
|
echo "Image: $FULL_IMAGE"
|
||||||
echo "Namespace: $NAMESPACE"
|
echo "Namespace: $NAMESPACE"
|
||||||
echo "Estratégia: Blue-Green (Zero Downtime)"
|
echo "Strategy: Blue-Green (Zero Downtime)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Verificar se está logado no OpenShift
|
# Check if logged into OpenShift
|
||||||
if ! oc whoami > /dev/null 2>&1; then
|
if ! oc whoami > /dev/null 2>&1; then
|
||||||
echo "❌ Não está logado no OpenShift. Execute: oc login"
|
echo "ERROR: Not logged into OpenShift. Run: oc login"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "✅ Logado no OpenShift como: $(oc whoami)"
|
echo "SUCCESS: Logged into OpenShift as: $(oc whoami)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Função para verificar se todos os pods estão prontos
|
# Function to check if all pods are ready
|
||||||
check_pods_ready() {
|
check_pods_ready() {
|
||||||
local deployment=$1
|
local deployment=$1
|
||||||
local namespace=$2
|
local namespace=$2
|
||||||
local timeout=${3:-300}
|
local timeout=${3:-300}
|
||||||
|
|
||||||
echo "⏳ Aguardando pods do deployment $deployment ficarem prontos..."
|
echo "Waiting for deployment $deployment pods to be ready..."
|
||||||
oc rollout status deployment/$deployment -n $namespace --timeout=${timeout}s
|
oc rollout status deployment/$deployment -n $namespace --timeout=${timeout}s
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para verificar se a aplicação está respondendo
|
# Function to check if application is responding
|
||||||
check_app_health() {
|
check_app_health() {
|
||||||
local service=$1
|
local service=$1
|
||||||
local namespace=$2
|
local namespace=$2
|
||||||
local port=${3:-8080}
|
local port=${3:-8080}
|
||||||
|
|
||||||
echo "🔍 Verificando saúde da aplicação..."
|
echo "Checking application health..."
|
||||||
|
|
||||||
# Tentar port-forward temporário para testar
|
# Try temporary port-forward for testing
|
||||||
local temp_pid
|
local temp_pid
|
||||||
oc port-forward service/$service $port:$port -n $namespace > /dev/null 2>&1 &
|
oc port-forward service/$service $port:$port -n $namespace > /dev/null 2>&1 &
|
||||||
temp_pid=$!
|
temp_pid=$!
|
||||||
|
|
||||||
# Aguardar port-forward inicializar
|
# Wait for port-forward to initialize
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
# Testar health check
|
# Test health check
|
||||||
local health_status
|
local health_status
|
||||||
health_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:$port/api/v1/health 2>/dev/null || echo "000")
|
health_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:$port/api/v1/health 2>/dev/null || echo "000")
|
||||||
|
|
||||||
# Parar port-forward temporário
|
# Stop temporary port-forward
|
||||||
kill $temp_pid 2>/dev/null || true
|
kill $temp_pid 2>/dev/null || true
|
||||||
|
|
||||||
if [ "$health_status" = "200" ]; then
|
if [ "$health_status" = "200" ]; then
|
||||||
echo "✅ Aplicação saudável (HTTP $health_status)"
|
echo "SUCCESS: Application healthy (HTTP $health_status)"
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
echo "❌ Aplicação não saudável (HTTP $health_status)"
|
echo "ERROR: Application not healthy (HTTP $health_status)"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Aplicar manifests básicos
|
# Apply basic manifests
|
||||||
echo "📋 Aplicando manifests básicos..."
|
echo "Applying basic manifests..."
|
||||||
oc apply -f k8s/namespace.yaml
|
oc apply -f k8s/namespace.yaml
|
||||||
oc apply -f k8s/rbac.yaml
|
oc apply -f k8s/rbac.yaml
|
||||||
oc apply -f k8s/configmap.yaml
|
oc apply -f k8s/configmap.yaml
|
||||||
|
|
||||||
# Verificar se o deployment existe
|
# Check if deployment exists
|
||||||
if oc get deployment $IMAGE_NAME -n $NAMESPACE > /dev/null 2>&1; then
|
if oc get deployment $IMAGE_NAME -n $NAMESPACE > /dev/null 2>&1; then
|
||||||
echo "🔄 Deployment existente encontrado. Iniciando atualização zero-downtime..."
|
echo "Existing deployment found. Starting zero-downtime update..."
|
||||||
|
|
||||||
# Obter número atual de réplicas
|
# Get current replica count
|
||||||
CURRENT_REPLICAS=$(oc get deployment $IMAGE_NAME -n $NAMESPACE -o jsonpath='{.spec.replicas}')
|
CURRENT_REPLICAS=$(oc get deployment $IMAGE_NAME -n $NAMESPACE -o jsonpath='{.spec.replicas}')
|
||||||
echo "📊 Réplicas atuais: $CURRENT_REPLICAS"
|
echo "Current replicas: $CURRENT_REPLICAS"
|
||||||
|
|
||||||
# Atualizar imagem do deployment
|
# Update deployment image
|
||||||
echo "🔄 Atualizando imagem para: $FULL_IMAGE"
|
echo "Updating image to: $FULL_IMAGE"
|
||||||
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$FULL_IMAGE -n $NAMESPACE
|
oc set image deployment/$IMAGE_NAME $IMAGE_NAME=$FULL_IMAGE -n $NAMESPACE
|
||||||
|
|
||||||
# Aguardar rollout com timeout maior
|
# Wait for rollout with longer timeout
|
||||||
echo "⏳ Aguardando rollout (pode levar alguns minutos)..."
|
echo "Waiting for rollout (may take a few minutes)..."
|
||||||
if check_pods_ready $IMAGE_NAME $NAMESPACE 600; then
|
if check_pods_ready $IMAGE_NAME $NAMESPACE 600; then
|
||||||
echo "✅ Rollout concluído com sucesso!"
|
echo "SUCCESS: Rollout completed successfully!"
|
||||||
|
|
||||||
# Verificar saúde da aplicação
|
# Check application health
|
||||||
if check_app_health "${IMAGE_NAME}-service" $NAMESPACE; then
|
if check_app_health "${IMAGE_NAME}-service" $NAMESPACE; then
|
||||||
echo "🎉 Deploy zero-downtime concluído com sucesso!"
|
echo "Zero downtime deploy completed successfully!"
|
||||||
else
|
else
|
||||||
echo "⚠️ Deploy concluído, mas aplicação pode não estar saudável"
|
echo "WARNING: Deploy completed, but application may not be healthy"
|
||||||
echo "🔍 Verifique os logs: oc logs -f deployment/$IMAGE_NAME -n $NAMESPACE"
|
echo "Check logs: oc logs -f deployment/$IMAGE_NAME -n $NAMESPACE"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "❌ Rollout falhou ou timeout"
|
echo "ERROR: Rollout failed or timeout"
|
||||||
echo "🔍 Verificando status dos pods:"
|
echo "Checking pod status:"
|
||||||
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "🆕 Deployment não existe. Criando novo deployment..."
|
echo "Deployment does not exist. Creating new deployment..."
|
||||||
oc apply -f k8s/deployment.yaml
|
oc apply -f k8s/deployment.yaml
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
oc apply -f k8s/route.yaml
|
oc apply -f k8s/route.yaml
|
||||||
|
|
||||||
# Aguardar pods ficarem prontos
|
# Wait for pods to be ready
|
||||||
if check_pods_ready $IMAGE_NAME $NAMESPACE 300; then
|
if check_pods_ready $IMAGE_NAME $NAMESPACE 300; then
|
||||||
echo "✅ Novo deployment criado com sucesso!"
|
echo "SUCCESS: New deployment created successfully!"
|
||||||
else
|
else
|
||||||
echo "❌ Falha ao criar deployment"
|
echo "ERROR: Failed to create deployment"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar status final
|
# Check final status
|
||||||
echo ""
|
echo ""
|
||||||
echo "📊 STATUS FINAL:"
|
echo "FINAL STATUS:"
|
||||||
echo "================"
|
echo "============="
|
||||||
oc get deployment $IMAGE_NAME -n $NAMESPACE
|
oc get deployment $IMAGE_NAME -n $NAMESPACE
|
||||||
echo ""
|
echo ""
|
||||||
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
oc get pods -n $NAMESPACE -l app.kubernetes.io/name=$IMAGE_NAME
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Obter URL da rota
|
# Get route URL
|
||||||
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
ROUTE_URL=$(oc get route $IMAGE_NAME-route -n $NAMESPACE -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
||||||
if [ -n "$ROUTE_URL" ]; then
|
if [ -n "$ROUTE_URL" ]; then
|
||||||
echo "🌐 URLs de acesso:"
|
echo "Access URLs:"
|
||||||
echo " OpenShift: https://$ROUTE_URL"
|
echo " OpenShift: https://$ROUTE_URL"
|
||||||
echo " Port-forward: http://localhost:8080 (se ativo)"
|
echo " Port-forward: http://localhost:8080 (if active)"
|
||||||
echo ""
|
echo ""
|
||||||
echo "💡 Para iniciar port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n $NAMESPACE"
|
echo "To start port-forward: oc port-forward service/${IMAGE_NAME}-service 8080:8080 -n $NAMESPACE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "✅ Deploy zero-downtime concluído!"
|
echo "Zero downtime deploy completed!"
|
||||||
echo "🔄 Estratégia: Rolling Update com maxUnavailable=0 (zero downtime)"
|
echo "Strategy: Rolling Update with maxUnavailable=0 (zero downtime)"
|
||||||
|
|||||||
@@ -1,95 +1,95 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de deploy para OpenShift usando GitHub
|
# Deploy script for OpenShift using GitHub
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
REPO_URL="https://github.com/andersonid/openshift-resource-governance.git"
|
REPO_URL="https://github.com/andersonid/openshift-resource-governance.git"
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
REGISTRY="andersonid"
|
REGISTRY="andersonid"
|
||||||
TAG="${1:-latest}"
|
TAG="${1:-latest}"
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Deploying OpenShift Resource Governance Tool from GitHub${NC}"
|
echo -e "${BLUE}Deploying OpenShift Resource Governance Tool from GitHub${NC}"
|
||||||
echo -e "${BLUE}Repository: ${REPO_URL}${NC}"
|
echo -e "${BLUE}Repository: ${REPO_URL}${NC}"
|
||||||
echo -e "${BLUE}Image: ${REGISTRY}/${IMAGE_NAME}:${TAG}${NC}"
|
echo -e "${BLUE}Image: ${REGISTRY}/${IMAGE_NAME}:${TAG}${NC}"
|
||||||
|
|
||||||
# Verificar se oc está instalado
|
# Check if oc is installed
|
||||||
if ! command -v oc &> /dev/null; then
|
if ! command -v oc &> /dev/null; then
|
||||||
echo -e "${RED}❌ OpenShift CLI (oc) não está instalado.${NC}"
|
echo -e "${RED}ERROR: OpenShift CLI (oc) is not installed.${NC}"
|
||||||
echo -e "${YELLOW}Instale o oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
|
echo -e "${YELLOW}Install oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar se está logado no OpenShift
|
# Check if logged into OpenShift
|
||||||
if ! oc whoami &> /dev/null; then
|
if ! oc whoami &> /dev/null; then
|
||||||
echo -e "${RED}❌ Não está logado no OpenShift.${NC}"
|
echo -e "${RED}ERROR: Not logged into OpenShift.${NC}"
|
||||||
echo -e "${YELLOW}Faça login com: oc login <cluster-url>${NC}"
|
echo -e "${YELLOW}Login with: oc login <cluster-url>${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}✅ Logado como: $(oc whoami)${NC}"
|
echo -e "${GREEN}SUCCESS: Logged in as: $(oc whoami)${NC}"
|
||||||
|
|
||||||
# Criar namespace se não existir
|
# Create namespace if it doesn't exist
|
||||||
echo -e "${YELLOW}📁 Creating namespace...${NC}"
|
echo -e "${YELLOW}Creating namespace...${NC}"
|
||||||
oc apply -f k8s/namespace.yaml
|
oc apply -f k8s/namespace.yaml
|
||||||
|
|
||||||
# Aplicar RBAC
|
# Apply RBAC
|
||||||
echo -e "${YELLOW}🔐 Applying RBAC...${NC}"
|
echo -e "${YELLOW}Applying RBAC...${NC}"
|
||||||
oc apply -f k8s/rbac.yaml
|
oc apply -f k8s/rbac.yaml
|
||||||
|
|
||||||
# Aplicar ConfigMap
|
# Apply ConfigMap
|
||||||
echo -e "${YELLOW}⚙️ Applying ConfigMap...${NC}"
|
echo -e "${YELLOW}Applying ConfigMap...${NC}"
|
||||||
oc apply -f k8s/configmap.yaml
|
oc apply -f k8s/configmap.yaml
|
||||||
|
|
||||||
# Atualizar imagem no DaemonSet
|
# Update image in DaemonSet
|
||||||
echo -e "${YELLOW}🔄 Updating image in DaemonSet...${NC}"
|
echo -e "${YELLOW}Updating image in DaemonSet...${NC}"
|
||||||
oc set image daemonset/${IMAGE_NAME} ${IMAGE_NAME}="${REGISTRY}/${IMAGE_NAME}:${TAG}" -n "${NAMESPACE}" || true
|
oc set image daemonset/${IMAGE_NAME} ${IMAGE_NAME}="${REGISTRY}/${IMAGE_NAME}:${TAG}" -n "${NAMESPACE}" || true
|
||||||
|
|
||||||
# Aplicar DaemonSet
|
# Apply DaemonSet
|
||||||
echo -e "${YELLOW}📦 Applying DaemonSet...${NC}"
|
echo -e "${YELLOW}Applying DaemonSet...${NC}"
|
||||||
oc apply -f k8s/daemonset.yaml
|
oc apply -f k8s/daemonset.yaml
|
||||||
|
|
||||||
# Aplicar Service
|
# Apply Service
|
||||||
echo -e "${YELLOW}🌐 Applying Service...${NC}"
|
echo -e "${YELLOW}Applying Service...${NC}"
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
|
|
||||||
# Aplicar Route
|
# Apply Route
|
||||||
echo -e "${YELLOW}🛣️ Applying Route...${NC}"
|
echo -e "${YELLOW}Applying Route...${NC}"
|
||||||
oc apply -f k8s/route.yaml
|
oc apply -f k8s/route.yaml
|
||||||
|
|
||||||
# Aguardar pods ficarem prontos
|
# Wait for pods to be ready
|
||||||
echo -e "${YELLOW}⏳ Waiting for pods to be ready...${NC}"
|
echo -e "${YELLOW}Waiting for pods to be ready...${NC}"
|
||||||
oc wait --for=condition=ready pod -l app.kubernetes.io/name=${IMAGE_NAME} -n "${NAMESPACE}" --timeout=300s
|
oc wait --for=condition=ready pod -l app.kubernetes.io/name=${IMAGE_NAME} -n "${NAMESPACE}" --timeout=300s
|
||||||
|
|
||||||
# Obter URL da rota
|
# Get route URL
|
||||||
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
|
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
|
||||||
if [ -n "${ROUTE_URL}" ]; then
|
if [ -n "${ROUTE_URL}" ]; then
|
||||||
echo -e "${GREEN}🎉 Deploy completed successfully!${NC}"
|
echo -e "${GREEN}SUCCESS: Deploy completed successfully!${NC}"
|
||||||
echo -e "${BLUE}🌐 Application URL: https://${ROUTE_URL}${NC}"
|
echo -e "${BLUE}Application URL: https://${ROUTE_URL}${NC}"
|
||||||
echo -e "${BLUE}📊 GitHub Repository: ${REPO_URL}${NC}"
|
echo -e "${BLUE}GitHub Repository: ${REPO_URL}${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${YELLOW}⚠️ Deploy completed, but route URL not found.${NC}"
|
echo -e "${YELLOW}WARNING: Deploy completed, but route URL not found.${NC}"
|
||||||
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
|
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Mostrar status
|
# Show status
|
||||||
echo -e "${BLUE}📊 Deployment status:${NC}"
|
echo -e "${BLUE}Deployment status:${NC}"
|
||||||
oc get all -n "${NAMESPACE}"
|
oc get all -n "${NAMESPACE}"
|
||||||
|
|
||||||
echo -e "${BLUE}🔍 To check logs:${NC}"
|
echo -e "${BLUE}To check logs:${NC}"
|
||||||
echo -e " oc logs -f daemonset/${IMAGE_NAME} -n ${NAMESPACE}"
|
echo -e " oc logs -f daemonset/${IMAGE_NAME} -n ${NAMESPACE}"
|
||||||
|
|
||||||
echo -e "${BLUE}🧪 To test health:${NC}"
|
echo -e "${BLUE}To test health:${NC}"
|
||||||
echo -e " curl https://${ROUTE_URL}/health"
|
echo -e " curl https://${ROUTE_URL}/health"
|
||||||
|
|
||||||
echo -e "${BLUE}📝 To update from GitHub:${NC}"
|
echo -e "${BLUE}To update from GitHub:${NC}"
|
||||||
echo -e " git pull origin main"
|
echo -e " git pull origin main"
|
||||||
echo -e " ./openshift-deploy.sh <new-tag>"
|
echo -e " ./openshift-deploy.sh <new-tag>"
|
||||||
|
|||||||
@@ -1,117 +1,117 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script para deploy automático após GitHub Actions
|
# Auto-deploy script after GitHub Actions
|
||||||
# Este script pode ser executado localmente ou via webhook
|
# This script can be executed locally or via webhook
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
REGISTRY="andersonid"
|
REGISTRY="andersonid"
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
IMAGE_TAG=${1:-latest}
|
IMAGE_TAG=${1:-latest}
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Auto-Deploy para OpenShift${NC}"
|
echo -e "${BLUE}Auto-Deploy to OpenShift${NC}"
|
||||||
echo "================================"
|
echo "================================"
|
||||||
echo "Imagem: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
|
echo "Image: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
|
||||||
echo "Namespace: ${NAMESPACE}"
|
echo "Namespace: ${NAMESPACE}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 1. Verificar login no OpenShift
|
# 1. Check OpenShift login
|
||||||
if ! oc whoami > /dev/null 2>&1; then
|
if ! oc whoami > /dev/null 2>&1; then
|
||||||
echo -e "${RED}❌ Não logado no OpenShift. Por favor, faça login com 'oc login'.${NC}"
|
echo -e "${RED}ERROR: Not logged into OpenShift. Please login with 'oc login'.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo -e "${GREEN}✅ Logado no OpenShift como: $(oc whoami)${NC}"
|
echo -e "${GREEN}SUCCESS: Logged into OpenShift as: $(oc whoami)${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 2. Verificar se a imagem existe no Docker Hub
|
# 2. Check if image exists on Docker Hub
|
||||||
echo -e "${BLUE}🔍 Verificando imagem no Docker Hub...${NC}"
|
echo -e "${BLUE}Checking image on Docker Hub...${NC}"
|
||||||
if ! skopeo inspect docker://${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} > /dev/null 2>&1; then
|
if ! skopeo inspect docker://${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} > /dev/null 2>&1; then
|
||||||
echo -e "${RED}❌ Imagem ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} não encontrada no Docker Hub!${NC}"
|
echo -e "${RED}ERROR: Image ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} not found on Docker Hub!${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo -e "${GREEN}✅ Imagem encontrada no Docker Hub${NC}"
|
echo -e "${GREEN}SUCCESS: Image found on Docker Hub${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 3. Verificar se o namespace existe
|
# 3. Check if namespace exists
|
||||||
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
|
if ! oc get namespace ${NAMESPACE} > /dev/null 2>&1; then
|
||||||
echo -e "${BLUE}📋 Criando namespace ${NAMESPACE}...${NC}"
|
echo -e "${BLUE}Creating namespace ${NAMESPACE}...${NC}"
|
||||||
oc create namespace ${NAMESPACE}
|
oc create namespace ${NAMESPACE}
|
||||||
else
|
else
|
||||||
echo -e "${GREEN}✅ Namespace ${NAMESPACE} já existe${NC}"
|
echo -e "${GREEN}SUCCESS: Namespace ${NAMESPACE} already exists${NC}"
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 4. Aplicar manifests básicos
|
# 4. Apply basic manifests
|
||||||
echo -e "${BLUE}📋 Aplicando manifests básicos...${NC}"
|
echo -e "${BLUE}Applying basic manifests...${NC}"
|
||||||
oc apply -f k8s/rbac.yaml -n ${NAMESPACE}
|
oc apply -f k8s/rbac.yaml -n ${NAMESPACE}
|
||||||
oc apply -f k8s/configmap.yaml -n ${NAMESPACE}
|
oc apply -f k8s/configmap.yaml -n ${NAMESPACE}
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 5. Verificar se o deployment existe
|
# 5. Check if deployment exists
|
||||||
if oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} > /dev/null 2>&1; then
|
if oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} > /dev/null 2>&1; then
|
||||||
echo -e "${BLUE}🔄 Deployment existente encontrado. Iniciando atualização...${NC}"
|
echo -e "${BLUE}Existing deployment found. Starting update...${NC}"
|
||||||
|
|
||||||
# Obter imagem atual
|
# Get current image
|
||||||
CURRENT_IMAGE=$(oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} -o jsonpath='{.spec.template.spec.containers[0].image}')
|
CURRENT_IMAGE=$(oc get deployment ${IMAGE_NAME} -n ${NAMESPACE} -o jsonpath='{.spec.template.spec.containers[0].image}')
|
||||||
echo "Imagem atual: ${CURRENT_IMAGE}"
|
echo "Current image: ${CURRENT_IMAGE}"
|
||||||
echo "Nova imagem: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
|
echo "New image: ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
|
||||||
|
|
||||||
# Verificar se a imagem mudou
|
# Check if image changed
|
||||||
if [ "${CURRENT_IMAGE}" = "${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" ]; then
|
if [ "${CURRENT_IMAGE}" = "${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" ]; then
|
||||||
echo -e "${YELLOW}⚠️ Imagem já está atualizada. Nenhuma ação necessária.${NC}"
|
echo -e "${YELLOW}WARNING: Image already up to date. No action needed.${NC}"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Atualizar deployment com nova imagem
|
# Update deployment with new image
|
||||||
echo -e "${BLUE}🔄 Atualizando imagem do deployment...${NC}"
|
echo -e "${BLUE}Updating deployment image...${NC}"
|
||||||
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} -n ${NAMESPACE}
|
oc set image deployment/${IMAGE_NAME} ${IMAGE_NAME}=${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} -n ${NAMESPACE}
|
||||||
|
|
||||||
# Aguardar rollout
|
# Wait for rollout
|
||||||
echo -e "${BLUE}⏳ Aguardando rollout (pode levar alguns minutos)...${NC}"
|
echo -e "${BLUE}Waiting for rollout (may take a few minutes)...${NC}"
|
||||||
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
||||||
echo -e "${GREEN}✅ Rollout concluído com sucesso!${NC}"
|
echo -e "${GREEN}SUCCESS: Rollout completed successfully!${NC}"
|
||||||
|
|
||||||
else
|
else
|
||||||
echo -e "${BLUE}📦 Deployment não encontrado. Criando novo deployment...${NC}"
|
echo -e "${BLUE}Deployment not found. Creating new deployment...${NC}"
|
||||||
# Aplicar deployment, service e route
|
# Apply deployment, service and route
|
||||||
oc apply -f k8s/deployment.yaml -n ${NAMESPACE}
|
oc apply -f k8s/deployment.yaml -n ${NAMESPACE}
|
||||||
oc apply -f k8s/service.yaml -n ${NAMESPACE}
|
oc apply -f k8s/service.yaml -n ${NAMESPACE}
|
||||||
oc apply -f k8s/route.yaml -n ${NAMESPACE}
|
oc apply -f k8s/route.yaml -n ${NAMESPACE}
|
||||||
|
|
||||||
# Aguardar rollout inicial
|
# Wait for initial rollout
|
||||||
echo -e "${BLUE}⏳ Aguardando rollout inicial...${NC}"
|
echo -e "${BLUE}Waiting for initial rollout...${NC}"
|
||||||
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
oc rollout status deployment/${IMAGE_NAME} -n ${NAMESPACE} --timeout=300s
|
||||||
echo -e "${GREEN}✅ Rollout inicial concluído com sucesso!${NC}"
|
echo -e "${GREEN}SUCCESS: Initial rollout completed successfully!${NC}"
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 6. Verificar status final
|
# 6. Check final status
|
||||||
echo -e "${BLUE}📊 STATUS FINAL:${NC}"
|
echo -e "${BLUE}FINAL STATUS:${NC}"
|
||||||
echo "================"
|
echo "================"
|
||||||
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
|
oc get deployment ${IMAGE_NAME} -n ${NAMESPACE}
|
||||||
echo ""
|
echo ""
|
||||||
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
|
oc get pods -n ${NAMESPACE} -l app.kubernetes.io/name=${IMAGE_NAME}
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 7. Obter URLs de acesso
|
# 7. Get access URLs
|
||||||
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
ROUTE_URL=$(oc get route ${IMAGE_NAME}-route -n ${NAMESPACE} -o jsonpath='{.spec.host}' 2>/dev/null || echo "")
|
||||||
echo -e "${BLUE}🌐 URLs de acesso:${NC}"
|
echo -e "${BLUE}Access URLs:${NC}"
|
||||||
if [ -n "$ROUTE_URL" ]; then
|
if [ -n "$ROUTE_URL" ]; then
|
||||||
echo " OpenShift: https://$ROUTE_URL"
|
echo " OpenShift: https://$ROUTE_URL"
|
||||||
else
|
else
|
||||||
echo " OpenShift: Rota não encontrada ou não disponível."
|
echo " OpenShift: Route not found or not available."
|
||||||
fi
|
fi
|
||||||
echo " Port-forward: http://localhost:8080 (se ativo)"
|
echo " Port-forward: http://localhost:8080 (if active)"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo -e "${GREEN}✅ Auto-deploy concluído com sucesso!${NC}"
|
echo -e "${GREEN}SUCCESS: Auto-deploy completed successfully!${NC}"
|
||||||
echo -e "${BLUE}🔄 Estratégia: Rolling Update com maxUnavailable=0 (zero downtime)${NC}"
|
echo -e "${BLUE}Strategy: Rolling Update with maxUnavailable=0 (zero downtime)${NC}"
|
||||||
|
|||||||
@@ -1,57 +1,57 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de build para OpenShift Resource Governance Tool
|
# Build script for OpenShift Resource Governance Tool
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
TAG="${1:-latest}"
|
TAG="${1:-latest}"
|
||||||
REGISTRY="${2:-andersonid}"
|
REGISTRY="${2:-andersonid}"
|
||||||
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
|
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Building OpenShift Resource Governance Tool${NC}"
|
echo -e "${BLUE}Building OpenShift Resource Governance Tool${NC}"
|
||||||
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
|
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
|
||||||
|
|
||||||
# Verificar se Podman está instalado
|
# Check if Podman is installed
|
||||||
if ! command -v podman &> /dev/null; then
|
if ! command -v podman &> /dev/null; then
|
||||||
echo -e "${RED}❌ Podman não está instalado. Instale o Podman e tente novamente.${NC}"
|
echo -e "${RED}ERROR: Podman is not installed. Install Podman and try again.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build da imagem
|
# Build image
|
||||||
echo -e "${YELLOW}📦 Building container image with Podman...${NC}"
|
echo -e "${YELLOW}Building container image with Podman...${NC}"
|
||||||
podman build -t "${FULL_IMAGE_NAME}" .
|
podman build -t "${FULL_IMAGE_NAME}" .
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
echo -e "${GREEN}✅ Image built successfully!${NC}"
|
echo -e "${GREEN}SUCCESS: Image built successfully!${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${RED}❌ Build failed!${NC}"
|
echo -e "${RED}ERROR: Build failed!${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Testar a imagem
|
# Test image
|
||||||
echo -e "${YELLOW}🧪 Testing image...${NC}"
|
echo -e "${YELLOW}Testing image...${NC}"
|
||||||
podman run --rm "${FULL_IMAGE_NAME}" python -c "import app.main; print('✅ App imports successfully')"
|
podman run --rm "${FULL_IMAGE_NAME}" python -c "import app.main; print('SUCCESS: App imports successfully')"
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
echo -e "${GREEN}✅ Image test passed!${NC}"
|
echo -e "${GREEN}SUCCESS: Image test passed!${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${RED}❌ Image test failed!${NC}"
|
echo -e "${RED}ERROR: Image test failed!${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Mostrar informações da imagem
|
# Show image information
|
||||||
echo -e "${BLUE}📊 Image information:${NC}"
|
echo -e "${BLUE}Image information:${NC}"
|
||||||
podman images "${FULL_IMAGE_NAME}"
|
podman images "${FULL_IMAGE_NAME}"
|
||||||
|
|
||||||
echo -e "${GREEN}🎉 Build completed successfully!${NC}"
|
echo -e "${GREEN}SUCCESS: Build completed successfully!${NC}"
|
||||||
echo -e "${BLUE}To push to registry:${NC}"
|
echo -e "${BLUE}To push to registry:${NC}"
|
||||||
echo -e " podman push ${FULL_IMAGE_NAME}"
|
echo -e " podman push ${FULL_IMAGE_NAME}"
|
||||||
echo -e "${BLUE}To run locally:${NC}"
|
echo -e "${BLUE}To run locally:${NC}"
|
||||||
|
|||||||
@@ -1,90 +1,90 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de deploy para OpenShift Resource Governance Tool
|
# Deploy script for OpenShift Resource Governance Tool
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
NAMESPACE="resource-governance"
|
NAMESPACE="resource-governance"
|
||||||
IMAGE_NAME="resource-governance"
|
IMAGE_NAME="resource-governance"
|
||||||
TAG="${1:-latest}"
|
TAG="${1:-latest}"
|
||||||
REGISTRY="${2:-andersonid}"
|
REGISTRY="${2:-andersonid}"
|
||||||
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
|
FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}:${TAG}"
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Deploying OpenShift Resource Governance Tool${NC}"
|
echo -e "${BLUE}Deploying OpenShift Resource Governance Tool${NC}"
|
||||||
echo -e "${BLUE}Namespace: ${NAMESPACE}${NC}"
|
echo -e "${BLUE}Namespace: ${NAMESPACE}${NC}"
|
||||||
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
|
echo -e "${BLUE}Image: ${FULL_IMAGE_NAME}${NC}"
|
||||||
|
|
||||||
# Verificar se oc está instalado
|
# Check if oc is installed
|
||||||
if ! command -v oc &> /dev/null; then
|
if ! command -v oc &> /dev/null; then
|
||||||
echo -e "${RED}❌ OpenShift CLI (oc) não está instalado.${NC}"
|
echo -e "${RED}ERROR: OpenShift CLI (oc) is not installed.${NC}"
|
||||||
echo -e "${YELLOW}Instale o oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
|
echo -e "${YELLOW}Install oc CLI: https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar se está logado no OpenShift
|
# Check if logged into OpenShift
|
||||||
if ! oc whoami &> /dev/null; then
|
if ! oc whoami &> /dev/null; then
|
||||||
echo -e "${RED}❌ Não está logado no OpenShift.${NC}"
|
echo -e "${RED}ERROR: Not logged into OpenShift.${NC}"
|
||||||
echo -e "${YELLOW}Faça login com: oc login <cluster-url>${NC}"
|
echo -e "${YELLOW}Login with: oc login <cluster-url>${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}✅ Logado como: $(oc whoami)${NC}"
|
echo -e "${GREEN}SUCCESS: Logged in as: $(oc whoami)${NC}"
|
||||||
|
|
||||||
# Criar namespace se não existir
|
# Create namespace if it doesn't exist
|
||||||
echo -e "${YELLOW}📁 Creating namespace...${NC}"
|
echo -e "${YELLOW}Creating namespace...${NC}"
|
||||||
oc apply -f k8s/namespace.yaml
|
oc apply -f k8s/namespace.yaml
|
||||||
|
|
||||||
# Aplicar RBAC
|
# Apply RBAC
|
||||||
echo -e "${YELLOW}🔐 Applying RBAC...${NC}"
|
echo -e "${YELLOW}Applying RBAC...${NC}"
|
||||||
oc apply -f k8s/rbac.yaml
|
oc apply -f k8s/rbac.yaml
|
||||||
|
|
||||||
# Aplicar ConfigMap
|
# Apply ConfigMap
|
||||||
echo -e "${YELLOW}⚙️ Applying ConfigMap...${NC}"
|
echo -e "${YELLOW}Applying ConfigMap...${NC}"
|
||||||
oc apply -f k8s/configmap.yaml
|
oc apply -f k8s/configmap.yaml
|
||||||
|
|
||||||
# Atualizar imagem no DaemonSet
|
# Update image in DaemonSet
|
||||||
echo -e "${YELLOW}🔄 Updating image in DaemonSet...${NC}"
|
echo -e "${YELLOW}Updating image in DaemonSet...${NC}"
|
||||||
oc set image daemonset/resource-governance resource-governance="${FULL_IMAGE_NAME}" -n "${NAMESPACE}"
|
oc set image daemonset/resource-governance resource-governance="${FULL_IMAGE_NAME}" -n "${NAMESPACE}"
|
||||||
|
|
||||||
# Aplicar DaemonSet
|
# Apply DaemonSet
|
||||||
echo -e "${YELLOW}📦 Applying DaemonSet...${NC}"
|
echo -e "${YELLOW}Applying DaemonSet...${NC}"
|
||||||
oc apply -f k8s/daemonset.yaml
|
oc apply -f k8s/daemonset.yaml
|
||||||
|
|
||||||
# Aplicar Service
|
# Apply Service
|
||||||
echo -e "${YELLOW}🌐 Applying Service...${NC}"
|
echo -e "${YELLOW}Applying Service...${NC}"
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
|
|
||||||
# Aplicar Route
|
# Apply Route
|
||||||
echo -e "${YELLOW}🛣️ Applying Route...${NC}"
|
echo -e "${YELLOW}Applying Route...${NC}"
|
||||||
oc apply -f k8s/route.yaml
|
oc apply -f k8s/route.yaml
|
||||||
|
|
||||||
# Aguardar pods ficarem prontos
|
# Wait for pods to be ready
|
||||||
echo -e "${YELLOW}⏳ Waiting for pods to be ready...${NC}"
|
echo -e "${YELLOW}Waiting for pods to be ready...${NC}"
|
||||||
oc wait --for=condition=ready pod -l app.kubernetes.io/name=resource-governance -n "${NAMESPACE}" --timeout=300s
|
oc wait --for=condition=ready pod -l app.kubernetes.io/name=resource-governance -n "${NAMESPACE}" --timeout=300s
|
||||||
|
|
||||||
# Obter URL da rota
|
# Get route URL
|
||||||
ROUTE_URL=$(oc get route resource-governance-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
|
ROUTE_URL=$(oc get route resource-governance-route -n "${NAMESPACE}" -o jsonpath='{.spec.host}')
|
||||||
if [ -n "${ROUTE_URL}" ]; then
|
if [ -n "${ROUTE_URL}" ]; then
|
||||||
echo -e "${GREEN}🎉 Deploy completed successfully!${NC}"
|
echo -e "${GREEN}SUCCESS: Deploy completed successfully!${NC}"
|
||||||
echo -e "${BLUE}🌐 Application URL: https://${ROUTE_URL}${NC}"
|
echo -e "${BLUE}Application URL: https://${ROUTE_URL}${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${YELLOW}⚠️ Deploy completed, but route URL not found.${NC}"
|
echo -e "${YELLOW}WARNING: Deploy completed, but route URL not found.${NC}"
|
||||||
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
|
echo -e "${BLUE}Check with: oc get routes -n ${NAMESPACE}${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Mostrar status
|
# Show status
|
||||||
echo -e "${BLUE}📊 Deployment status:${NC}"
|
echo -e "${BLUE}Deployment status:${NC}"
|
||||||
oc get all -n "${NAMESPACE}"
|
oc get all -n "${NAMESPACE}"
|
||||||
|
|
||||||
echo -e "${BLUE}🔍 To check logs:${NC}"
|
echo -e "${BLUE}To check logs:${NC}"
|
||||||
echo -e " oc logs -f daemonset/resource-governance -n ${NAMESPACE}"
|
echo -e " oc logs -f daemonset/resource-governance -n ${NAMESPACE}"
|
||||||
|
|
||||||
echo -e "${BLUE}🧪 To test health:${NC}"
|
echo -e "${BLUE}To test health:${NC}"
|
||||||
echo -e " curl https://${ROUTE_URL}/health"
|
echo -e " curl https://${ROUTE_URL}/health"
|
||||||
|
|||||||
@@ -1,46 +1,46 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script para criar releases e tags do OpenShift Resource Governance
|
# Script to create releases and tags for OpenShift Resource Governance
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Função para mostrar ajuda
|
# Function to show help
|
||||||
show_help() {
|
show_help() {
|
||||||
echo "🚀 OpenShift Resource Governance - Release Script"
|
echo "OpenShift Resource Governance - Release Script"
|
||||||
echo "=================================================="
|
echo "=============================================="
|
||||||
echo ""
|
echo ""
|
||||||
echo "Uso: $0 [COMANDO] [VERSÃO]"
|
echo "Usage: $0 [COMMAND] [VERSION]"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Comandos:"
|
echo "Commands:"
|
||||||
echo " patch Criar release patch (ex: 1.0.0 -> 1.0.1)"
|
echo " patch Create patch release (ex: 1.0.0 -> 1.0.1)"
|
||||||
echo " minor Criar release minor (ex: 1.0.0 -> 1.1.0)"
|
echo " minor Create minor release (ex: 1.0.0 -> 1.1.0)"
|
||||||
echo " major Criar release major (ex: 1.0.0 -> 2.0.0)"
|
echo " major Create major release (ex: 1.0.0 -> 2.0.0)"
|
||||||
echo " custom Criar release com versão customizada"
|
echo " custom Create release with custom version"
|
||||||
echo " list Listar releases existentes"
|
echo " list List existing releases"
|
||||||
echo " help Mostrar esta ajuda"
|
echo " help Show this help"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Exemplos:"
|
echo "Examples:"
|
||||||
echo " $0 patch # 1.0.0 -> 1.0.1"
|
echo " $0 patch # 1.0.0 -> 1.0.1"
|
||||||
echo " $0 minor # 1.0.0 -> 1.1.0"
|
echo " $0 minor # 1.0.0 -> 1.1.0"
|
||||||
echo " $0 custom 2.0.0-beta.1 # Versão customizada"
|
echo " $0 custom 2.0.0-beta.1 # Custom version"
|
||||||
echo " $0 list # Listar releases"
|
echo " $0 list # List releases"
|
||||||
echo ""
|
echo ""
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para obter a versão atual
|
# Function to get current version
|
||||||
get_current_version() {
|
get_current_version() {
|
||||||
local latest_tag=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
local latest_tag=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||||
echo "${latest_tag#v}" # Remove o 'v' do início
|
echo "${latest_tag#v}" # Remove 'v' prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para incrementar versão
|
# Function to increment version
|
||||||
increment_version() {
|
increment_version() {
|
||||||
local version=$1
|
local version=$1
|
||||||
local type=$2
|
local type=$2
|
||||||
@@ -66,78 +66,78 @@ increment_version() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para validar versão
|
# Function to validate version
|
||||||
validate_version() {
|
validate_version() {
|
||||||
local version=$1
|
local version=$1
|
||||||
if [[ ! $version =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
|
if [[ ! $version =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
|
||||||
echo -e "${RED}❌ Versão inválida: $version${NC}"
|
echo -e "${RED}ERROR: Invalid version: $version${NC}"
|
||||||
echo "Formato esperado: X.Y.Z ou X.Y.Z-suffix"
|
echo "Expected format: X.Y.Z or X.Y.Z-suffix"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para criar release
|
# Function to create release
|
||||||
create_release() {
|
create_release() {
|
||||||
local version=$1
|
local version=$1
|
||||||
local tag="v$version"
|
local tag="v$version"
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Criando release $tag${NC}"
|
echo -e "${BLUE}Creating release $tag${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Verificar se já existe
|
# Check if already exists
|
||||||
if git tag -l | grep -q "^$tag$"; then
|
if git tag -l | grep -q "^$tag$"; then
|
||||||
echo -e "${RED}❌ Tag $tag já existe!${NC}"
|
echo -e "${RED}ERROR: Tag $tag already exists!${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar se há mudanças não commitadas
|
# Check for uncommitted changes
|
||||||
if ! git diff-index --quiet HEAD --; then
|
if ! git diff-index --quiet HEAD --; then
|
||||||
echo -e "${YELLOW}⚠️ Há mudanças não commitadas. Deseja continuar? (y/N)${NC}"
|
echo -e "${YELLOW}WARNING: There are uncommitted changes. Continue? (y/N)${NC}"
|
||||||
read -r response
|
read -r response
|
||||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||||
echo "Cancelado."
|
echo "Cancelled."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fazer commit das mudanças se houver
|
# Commit changes if any
|
||||||
if ! git diff-index --quiet HEAD --; then
|
if ! git diff-index --quiet HEAD --; then
|
||||||
echo -e "${BLUE}📝 Fazendo commit das mudanças...${NC}"
|
echo -e "${BLUE}Committing changes...${NC}"
|
||||||
git add .
|
git add .
|
||||||
git commit -m "Release $tag"
|
git commit -m "Release $tag"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Criar tag
|
# Create tag
|
||||||
echo -e "${BLUE}🏷️ Criando tag $tag...${NC}"
|
echo -e "${BLUE}Creating tag $tag...${NC}"
|
||||||
git tag -a "$tag" -m "Release $tag"
|
git tag -a "$tag" -m "Release $tag"
|
||||||
|
|
||||||
# Push da tag
|
# Push tag
|
||||||
echo -e "${BLUE}📤 Fazendo push da tag...${NC}"
|
echo -e "${BLUE}Pushing tag...${NC}"
|
||||||
git push origin "$tag"
|
git push origin "$tag"
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "${GREEN}✅ Release $tag criado com sucesso!${NC}"
|
echo -e "${GREEN}SUCCESS: Release $tag created successfully!${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
echo "🔗 Links úteis:"
|
echo "Useful links:"
|
||||||
echo " GitHub: https://github.com/andersonid/openshift-resource-governance/releases/tag/$tag"
|
echo " GitHub: https://github.com/andersonid/openshift-resource-governance/releases/tag/$tag"
|
||||||
echo " Docker Hub: https://hub.docker.com/r/andersonid/resource-governance/tags"
|
echo " Docker Hub: https://hub.docker.com/r/andersonid/resource-governance/tags"
|
||||||
echo ""
|
echo ""
|
||||||
echo "🚀 O GitHub Actions irá automaticamente:"
|
echo "GitHub Actions will automatically:"
|
||||||
echo " 1. Buildar a imagem Docker"
|
echo " 1. Build Docker image"
|
||||||
echo " 2. Fazer push para Docker Hub"
|
echo " 2. Push to Docker Hub"
|
||||||
echo " 3. Criar release no GitHub"
|
echo " 3. Create GitHub release"
|
||||||
echo ""
|
echo ""
|
||||||
echo "⏳ Aguarde alguns minutos e verifique:"
|
echo "Wait a few minutes and check:"
|
||||||
echo " gh run list --repo andersonid/openshift-resource-governance --workflow='build-only.yml'"
|
echo " gh run list --repo andersonid/openshift-resource-governance --workflow='build-only.yml'"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Função para listar releases
|
# Function to list releases
|
||||||
list_releases() {
|
list_releases() {
|
||||||
echo -e "${BLUE}📋 Releases existentes:${NC}"
|
echo -e "${BLUE}Existing releases:${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
git tag -l --sort=-version:refname | head -10
|
git tag -l --sort=-version:refname | head -10
|
||||||
echo ""
|
echo ""
|
||||||
echo "💡 Para ver todos: git tag -l --sort=-version:refname"
|
echo "To see all: git tag -l --sort=-version:refname"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Main
|
# Main
|
||||||
@@ -162,8 +162,8 @@ case "${1:-help}" in
|
|||||||
;;
|
;;
|
||||||
"custom")
|
"custom")
|
||||||
if [ -z "$2" ]; then
|
if [ -z "$2" ]; then
|
||||||
echo -e "${RED}❌ Versão customizada não fornecida!${NC}"
|
echo -e "${RED}ERROR: Custom version not provided!${NC}"
|
||||||
echo "Uso: $0 custom 2.0.0-beta.1"
|
echo "Usage: $0 custom 2.0.0-beta.1"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
validate_version "$2"
|
validate_version "$2"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
Webhook para deploy automático após GitHub Actions
|
Webhook for automatic deployment after GitHub Actions
|
||||||
Este script pode ser executado como um serviço para detectar mudanças no Docker Hub
|
This script can be run as a service to detect changes on Docker Hub
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -11,13 +11,13 @@ import logging
|
|||||||
from flask import Flask, request, jsonify
|
from flask import Flask, request, jsonify
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
# Configuração do logging
|
# Logging configuration
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
# Configurações
|
# Configuration
|
||||||
IMAGE_NAME = os.getenv('IMAGE_NAME', 'resource-governance')
|
IMAGE_NAME = os.getenv('IMAGE_NAME', 'resource-governance')
|
||||||
REGISTRY = os.getenv('REGISTRY', 'andersonid')
|
REGISTRY = os.getenv('REGISTRY', 'andersonid')
|
||||||
NAMESPACE = os.getenv('NAMESPACE', 'resource-governance')
|
NAMESPACE = os.getenv('NAMESPACE', 'resource-governance')
|
||||||
@@ -25,100 +25,100 @@ SCRIPT_PATH = os.getenv('AUTO_DEPLOY_SCRIPT', './scripts/auto-deploy.sh')
|
|||||||
|
|
||||||
@app.route('/webhook/dockerhub', methods=['POST'])
|
@app.route('/webhook/dockerhub', methods=['POST'])
|
||||||
def dockerhub_webhook():
|
def dockerhub_webhook():
|
||||||
"""Webhook para receber notificações do Docker Hub"""
|
"""Webhook to receive Docker Hub notifications"""
|
||||||
try:
|
try:
|
||||||
data = request.get_json()
|
data = request.get_json()
|
||||||
|
|
||||||
# Verificar se é uma notificação de push
|
# Check if it's a push notification
|
||||||
if data.get('push_data', {}).get('tag') == 'latest':
|
if data.get('push_data', {}).get('tag') == 'latest':
|
||||||
logger.info(f"Recebida notificação de push para {REGISTRY}/{IMAGE_NAME}:latest")
|
logger.info(f"Received push notification for {REGISTRY}/{IMAGE_NAME}:latest")
|
||||||
|
|
||||||
# Executar deploy automático
|
# Execute automatic deployment
|
||||||
result = run_auto_deploy('latest')
|
result = run_auto_deploy('latest')
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'status': 'success',
|
'status': 'success',
|
||||||
'message': 'Deploy automático iniciado',
|
'message': 'Automatic deployment started',
|
||||||
'result': result
|
'result': result
|
||||||
}), 200
|
}), 200
|
||||||
else:
|
else:
|
||||||
logger.info(f"Push ignorado - tag: {data.get('push_data', {}).get('tag')}")
|
logger.info(f"Push ignored - tag: {data.get('push_data', {}).get('tag')}")
|
||||||
return jsonify({'status': 'ignored', 'message': 'Tag não é latest'}), 200
|
return jsonify({'status': 'ignored', 'message': 'Tag is not latest'}), 200
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Erro no webhook: {e}")
|
logger.error(f"Webhook error: {e}")
|
||||||
return jsonify({'status': 'error', 'message': str(e)}), 500
|
return jsonify({'status': 'error', 'message': str(e)}), 500
|
||||||
|
|
||||||
@app.route('/webhook/github', methods=['POST'])
|
@app.route('/webhook/github', methods=['POST'])
|
||||||
def github_webhook():
|
def github_webhook():
|
||||||
"""Webhook para receber notificações do GitHub"""
|
"""Webhook to receive GitHub notifications"""
|
||||||
try:
|
try:
|
||||||
# Verificar se é um push para main
|
# Check if it's a push to main
|
||||||
if request.headers.get('X-GitHub-Event') == 'push':
|
if request.headers.get('X-GitHub-Event') == 'push':
|
||||||
data = request.get_json()
|
data = request.get_json()
|
||||||
|
|
||||||
if data.get('ref') == 'refs/heads/main':
|
if data.get('ref') == 'refs/heads/main':
|
||||||
logger.info("Recebida notificação de push para main branch")
|
logger.info("Received push notification for main branch")
|
||||||
|
|
||||||
# Executar deploy automático
|
# Execute automatic deployment
|
||||||
result = run_auto_deploy('latest')
|
result = run_auto_deploy('latest')
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'status': 'success',
|
'status': 'success',
|
||||||
'message': 'Deploy automático iniciado',
|
'message': 'Automatic deployment started',
|
||||||
'result': result
|
'result': result
|
||||||
}), 200
|
}), 200
|
||||||
else:
|
else:
|
||||||
logger.info(f"Push ignorado - branch: {data.get('ref')}")
|
logger.info(f"Push ignored - branch: {data.get('ref')}")
|
||||||
return jsonify({'status': 'ignored', 'message': 'Branch não é main'}), 200
|
return jsonify({'status': 'ignored', 'message': 'Branch is not main'}), 200
|
||||||
else:
|
else:
|
||||||
logger.info(f"Evento ignorado: {request.headers.get('X-GitHub-Event')}")
|
logger.info(f"Event ignored: {request.headers.get('X-GitHub-Event')}")
|
||||||
return jsonify({'status': 'ignored', 'message': 'Evento não é push'}), 200
|
return jsonify({'status': 'ignored', 'message': 'Event is not push'}), 200
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Erro no webhook: {e}")
|
logger.error(f"Webhook error: {e}")
|
||||||
return jsonify({'status': 'error', 'message': str(e)}), 500
|
return jsonify({'status': 'error', 'message': str(e)}), 500
|
||||||
|
|
||||||
@app.route('/deploy/<tag>', methods=['POST'])
|
@app.route('/deploy/<tag>', methods=['POST'])
|
||||||
def manual_deploy(tag):
|
def manual_deploy(tag):
|
||||||
"""Deploy manual com tag específica"""
|
"""Manual deployment with specific tag"""
|
||||||
try:
|
try:
|
||||||
logger.info(f"Deploy manual solicitado para tag: {tag}")
|
logger.info(f"Manual deployment requested for tag: {tag}")
|
||||||
|
|
||||||
result = run_auto_deploy(tag)
|
result = run_auto_deploy(tag)
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'status': 'success',
|
'status': 'success',
|
||||||
'message': f'Deploy manual iniciado para tag: {tag}',
|
'message': f'Manual deployment started for tag: {tag}',
|
||||||
'result': result
|
'result': result
|
||||||
}), 200
|
}), 200
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Erro no deploy manual: {e}")
|
logger.error(f"Manual deployment error: {e}")
|
||||||
return jsonify({'status': 'error', 'message': str(e)}), 500
|
return jsonify({'status': 'error', 'message': str(e)}), 500
|
||||||
|
|
||||||
def run_auto_deploy(tag):
|
def run_auto_deploy(tag):
|
||||||
"""Executar script de deploy automático"""
|
"""Execute automatic deployment script"""
|
||||||
try:
|
try:
|
||||||
logger.info(f"Executando deploy automático para tag: {tag}")
|
logger.info(f"Executing automatic deployment for tag: {tag}")
|
||||||
|
|
||||||
# Executar script de deploy
|
# Execute deployment script
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
[SCRIPT_PATH, tag],
|
[SCRIPT_PATH, tag],
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
text=True,
|
text=True,
|
||||||
timeout=600 # 10 minutos timeout
|
timeout=600 # 10 minutes timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
if result.returncode == 0:
|
if result.returncode == 0:
|
||||||
logger.info("Deploy automático concluído com sucesso")
|
logger.info("Automatic deployment completed successfully")
|
||||||
return {
|
return {
|
||||||
'success': True,
|
'success': True,
|
||||||
'stdout': result.stdout,
|
'stdout': result.stdout,
|
||||||
'stderr': result.stderr
|
'stderr': result.stderr
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
logger.error(f"Deploy automático falhou: {result.stderr}")
|
logger.error(f"Automatic deployment failed: {result.stderr}")
|
||||||
return {
|
return {
|
||||||
'success': False,
|
'success': False,
|
||||||
'stdout': result.stdout,
|
'stdout': result.stdout,
|
||||||
@@ -126,13 +126,13 @@ def run_auto_deploy(tag):
|
|||||||
}
|
}
|
||||||
|
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
logger.error("Deploy automático timeout")
|
logger.error("Automatic deployment timeout")
|
||||||
return {
|
return {
|
||||||
'success': False,
|
'success': False,
|
||||||
'error': 'Timeout'
|
'error': 'Timeout'
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Erro ao executar deploy automático: {e}")
|
logger.error(f"Error executing automatic deployment: {e}")
|
||||||
return {
|
return {
|
||||||
'success': False,
|
'success': False,
|
||||||
'error': str(e)
|
'error': str(e)
|
||||||
@@ -150,9 +150,9 @@ def health():
|
|||||||
|
|
||||||
@app.route('/status', methods=['GET'])
|
@app.route('/status', methods=['GET'])
|
||||||
def status():
|
def status():
|
||||||
"""Status do serviço"""
|
"""Service status"""
|
||||||
try:
|
try:
|
||||||
# Verificar se está logado no OpenShift
|
# Check if logged into OpenShift
|
||||||
result = subprocess.run(['oc', 'whoami'], capture_output=True, text=True)
|
result = subprocess.run(['oc', 'whoami'], capture_output=True, text=True)
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
@@ -174,7 +174,7 @@ if __name__ == '__main__':
|
|||||||
port = int(os.getenv('PORT', 8080))
|
port = int(os.getenv('PORT', 8080))
|
||||||
debug = os.getenv('DEBUG', 'false').lower() == 'true'
|
debug = os.getenv('DEBUG', 'false').lower() == 'true'
|
||||||
|
|
||||||
logger.info(f"Iniciando webhook server na porta {port}")
|
logger.info(f"Starting webhook server on port {port}")
|
||||||
logger.info(f"Configurações: IMAGE_NAME={IMAGE_NAME}, REGISTRY={REGISTRY}, NAMESPACE={NAMESPACE}")
|
logger.info(f"Configuration: IMAGE_NAME={IMAGE_NAME}, REGISTRY={REGISTRY}, NAMESPACE={NAMESPACE}")
|
||||||
|
|
||||||
app.run(host='0.0.0.0', port=port, debug=debug)
|
app.run(host='0.0.0.0', port=port, debug=debug)
|
||||||
|
|||||||
64
setup.sh
64
setup.sh
@@ -1,67 +1,67 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Script de setup para OpenShift Resource Governance Tool
|
# Setup script for OpenShift Resource Governance Tool
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Cores para output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
echo -e "${BLUE}🚀 Setting up OpenShift Resource Governance Tool${NC}"
|
echo -e "${BLUE}Setting up OpenShift Resource Governance Tool${NC}"
|
||||||
|
|
||||||
# Verificar se Python está instalado
|
# Check if Python is installed
|
||||||
if ! command -v python3 &> /dev/null; then
|
if ! command -v python3 &> /dev/null; then
|
||||||
echo -e "${RED}❌ Python 3 não está instalado.${NC}"
|
echo -e "${RED}ERROR: Python 3 is not installed.${NC}"
|
||||||
echo -e "${YELLOW}Instale Python 3.11+ e tente novamente.${NC}"
|
echo -e "${YELLOW}Install Python 3.11+ and try again.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar se pip está instalado
|
# Check if pip is installed
|
||||||
if ! command -v pip3 &> /dev/null; then
|
if ! command -v pip3 &> /dev/null; then
|
||||||
echo -e "${RED}❌ pip3 não está instalado.${NC}"
|
echo -e "${RED}ERROR: pip3 is not installed.${NC}"
|
||||||
echo -e "${YELLOW}Instale pip3 e tente novamente.${NC}"
|
echo -e "${YELLOW}Install pip3 and try again.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Instalar dependências Python
|
# Install Python dependencies
|
||||||
echo -e "${YELLOW}📦 Installing Python dependencies...${NC}"
|
echo -e "${YELLOW}Installing Python dependencies...${NC}"
|
||||||
pip3 install -r requirements.txt
|
pip3 install -r requirements.txt
|
||||||
|
|
||||||
# Tornar scripts executáveis
|
# Make scripts executable
|
||||||
echo -e "${YELLOW}🔧 Making scripts executable...${NC}"
|
echo -e "${YELLOW}Making scripts executable...${NC}"
|
||||||
chmod +x scripts/*.sh
|
chmod +x scripts/*.sh
|
||||||
|
|
||||||
# Criar diretório de relatórios
|
# Create reports directory
|
||||||
echo -e "${YELLOW}📁 Creating reports directory...${NC}"
|
echo -e "${YELLOW}Creating reports directory...${NC}"
|
||||||
mkdir -p reports
|
mkdir -p reports
|
||||||
|
|
||||||
# Verificar se Docker está instalado
|
# Check if Docker is installed
|
||||||
if command -v docker &> /dev/null; then
|
if command -v docker &> /dev/null; then
|
||||||
echo -e "${GREEN}✅ Docker encontrado${NC}"
|
echo -e "${GREEN}SUCCESS: Docker found${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${YELLOW}⚠️ Docker não encontrado. Instale para fazer build da imagem.${NC}"
|
echo -e "${YELLOW}WARNING: Docker not found. Install to build image.${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verificar se oc está instalado
|
# Check if oc is installed
|
||||||
if command -v oc &> /dev/null; then
|
if command -v oc &> /dev/null; then
|
||||||
echo -e "${GREEN}✅ OpenShift CLI (oc) encontrado${NC}"
|
echo -e "${GREEN}SUCCESS: OpenShift CLI (oc) found${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${YELLOW}⚠️ OpenShift CLI (oc) não encontrado. Instale para fazer deploy.${NC}"
|
echo -e "${YELLOW}WARNING: OpenShift CLI (oc) not found. Install to deploy.${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}🎉 Setup completed successfully!${NC}"
|
echo -e "${GREEN}SUCCESS: Setup completed successfully!${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "${BLUE}Próximos passos:${NC}"
|
echo -e "${BLUE}Next steps:${NC}"
|
||||||
echo -e "1. ${YELLOW}Desenvolvimento local:${NC} make dev"
|
echo -e "1. ${YELLOW}Local development:${NC} make dev"
|
||||||
echo -e "2. ${YELLOW}Build da imagem:${NC} make build"
|
echo -e "2. ${YELLOW}Build image:${NC} make build"
|
||||||
echo -e "3. ${YELLOW}Deploy no OpenShift:${NC} make deploy"
|
echo -e "3. ${YELLOW}Deploy to OpenShift:${NC} make deploy"
|
||||||
echo -e "4. ${YELLOW}Ver documentação:${NC} cat README.md"
|
echo -e "4. ${YELLOW}View documentation:${NC} cat README.md"
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "${BLUE}Comandos úteis:${NC}"
|
echo -e "${BLUE}Useful commands:${NC}"
|
||||||
echo -e " make help - Mostrar todos os comandos"
|
echo -e " make help - Show all commands"
|
||||||
echo -e " make test - Executar testes"
|
echo -e " make test - Run tests"
|
||||||
echo -e " make logs - Ver logs da aplicação"
|
echo -e " make logs - View application logs"
|
||||||
echo -e " make status - Ver status da aplicação"
|
echo -e " make status - View application status"
|
||||||
|
|||||||
Reference in New Issue
Block a user