diff --git a/.github/workflows/openshift-deploy.yml b/.github/workflows/openshift-deploy.yml index 6aeee7a..664ae30 100644 --- a/.github/workflows/openshift-deploy.yml +++ b/.github/workflows/openshift-deploy.yml @@ -64,25 +64,32 @@ jobs: # Login to OpenShift echo "${{ secrets.OPENSHIFT_TOKEN }}" | oc login ${{ secrets.OPENSHIFT_SERVER }} --token-stdin - # Update image in DaemonSet - oc set image daemonset/${{ env.IMAGE_NAME }} ${{ env.IMAGE_NAME }}=${{ steps.meta.outputs.tags }} -n ${{ env.NAMESPACE }} || true - - # Apply manifests + # Apply manifests (namespace, rbac, configmap) oc apply -f k8s/namespace.yaml oc apply -f k8s/rbac.yaml oc apply -f k8s/configmap.yaml - oc apply -f k8s/daemonset.yaml + + # Update deployment with new image + oc set image deployment/${{ env.IMAGE_NAME }} ${{ env.IMAGE_NAME }}=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} -n ${{ env.NAMESPACE }} || true + + # Apply deployment, service and route + oc apply -f k8s/deployment.yaml oc apply -f k8s/service.yaml oc apply -f k8s/route.yaml # Wait for rollout - oc rollout status daemonset/${{ env.IMAGE_NAME }} -n ${{ env.NAMESPACE }} --timeout=300s + oc rollout status deployment/${{ env.IMAGE_NAME }} -n ${{ env.NAMESPACE }} --timeout=300s + + # Verify deployment + oc get deployment ${{ env.IMAGE_NAME }} -n ${{ env.NAMESPACE }} + oc get pods -n ${{ env.NAMESPACE }} -l app.kubernetes.io/name=${{ env.IMAGE_NAME }} # Get route URL ROUTE_URL=$(oc get route ${{ env.IMAGE_NAME }}-route -n ${{ env.NAMESPACE }} -o jsonpath='{.spec.host}' 2>/dev/null || echo "") if [ -n "$ROUTE_URL" ]; then echo "🚀 Application deployed successfully!" echo "🌐 URL: https://$ROUTE_URL" + echo "📊 Status: oc get pods -n ${{ env.NAMESPACE }} -l app.kubernetes.io/name=${{ env.IMAGE_NAME }}" fi env: OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }} diff --git a/README.md b/README.md index 2a18f8a..4dda325 100644 --- a/README.md +++ b/README.md @@ -38,9 +38,47 @@ Uma ferramenta de governança de recursos para clusters OpenShift que vai além ### 2. Deploy no OpenShift -#### Deploy Automático (Recomendado) +#### 🚀 CI/CD Automático (Recomendado para Produção) ```bash -# Deploy completo com ImagePullSecret +# 1. Configurar secrets do GitHub +./scripts/setup-github-secrets.sh + +# 2. Fazer commit e push +git add . +git commit -m "Nova funcionalidade" +git push origin main + +# 3. GitHub Actions fará deploy automático! +``` + +**Fluxo Automático:** +- ✅ **Push para main** → GitHub Actions detecta mudança +- ✅ **Build automático** → Nova imagem no Docker Hub +- ✅ **Deploy automático** → OpenShift atualiza deployment +- ✅ **Rolling Update** → Zero downtime +- ✅ **Health Checks** → Validação automática + +#### 🔧 Deploy Manual (Desenvolvimento) +```bash +# Deploy com estratégia Blue-Green +./scripts/blue-green-deploy.sh + +# Deploy com tag específica +./scripts/blue-green-deploy.sh v1.2.0 + +# Testar fluxo CI/CD localmente +./scripts/test-ci-cd.sh +``` + +**Scripts para Desenvolvimento:** +- ✅ **Controle total** sobre o processo +- ✅ **Iteração rápida** durante desenvolvimento +- ✅ **Debugging** mais fácil +- ✅ **Testes locais** antes de fazer push + +#### Deploy Completo (Inicial) +```bash +# Deploy completo com ImagePullSecret (primeira vez) ./scripts/deploy-complete.sh ``` diff --git a/app/api/routes.py b/app/api/routes.py index 6534ce8..912b093 100644 --- a/app/api/routes.py +++ b/app/api/routes.py @@ -3,6 +3,7 @@ Rotas da API """ import logging from typing import List, Optional +from datetime import datetime from fastapi import APIRouter, HTTPException, Depends, Request from fastapi.responses import FileResponse @@ -12,6 +13,7 @@ from app.models.resource_models import ( ) from app.services.validation_service import ValidationService from app.services.report_service import ReportService +from app.services.historical_analysis import HistoricalAnalysisService logger = logging.getLogger(__name__) @@ -365,6 +367,61 @@ async def apply_recommendation( logger.error(f"Erro ao aplicar recomendação: {e}") raise HTTPException(status_code=500, detail=str(e)) +@api_router.get("/validations/historical") +async def get_historical_validations( + namespace: Optional[str] = None, + time_range: str = "24h", + k8s_client=Depends(get_k8s_client) +): + """Obter validações com análise histórica do Prometheus""" + try: + validation_service = ValidationService() + + # Coletar pods + if namespace: + namespace_resources = await k8s_client.get_namespace_resources(namespace) + pods = namespace_resources.pods + else: + pods = await k8s_client.get_all_pods() + + # Validar com análise histórica + all_validations = [] + for pod in pods: + pod_validations = await validation_service.validate_pod_resources_with_historical_analysis( + pod, time_range + ) + all_validations.extend(pod_validations) + + return { + "validations": all_validations, + "total": len(all_validations), + "time_range": time_range, + "namespace": namespace or "all" + } + + except Exception as e: + logger.error(f"Erro ao obter validações históricas: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@api_router.get("/cluster/historical-summary") +async def get_cluster_historical_summary( + time_range: str = "24h" +): + """Obter resumo histórico do cluster""" + try: + historical_service = HistoricalAnalysisService() + summary = await historical_service.get_cluster_historical_summary(time_range) + + return { + "summary": summary, + "time_range": time_range, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"Erro ao obter resumo histórico: {e}") + raise HTTPException(status_code=500, detail=str(e)) + @api_router.get("/health") async def health_check(): """Health check da API""" diff --git a/app/services/historical_analysis.py b/app/services/historical_analysis.py new file mode 100644 index 0000000..a7decad --- /dev/null +++ b/app/services/historical_analysis.py @@ -0,0 +1,445 @@ +""" +Serviço de análise histórica usando métricas do Prometheus +""" +import logging +import asyncio +from typing import List, Dict, Any, Optional, Tuple +from datetime import datetime, timedelta +import aiohttp +import json + +from app.models.resource_models import PodResource, ResourceValidation +from app.core.config import settings + +logger = logging.getLogger(__name__) + +class HistoricalAnalysisService: + """Serviço para análise histórica de recursos usando Prometheus""" + + def __init__(self): + self.prometheus_url = settings.prometheus_url + self.time_ranges = { + '1h': 3600, # 1 hora + '6h': 21600, # 6 horas + '24h': 86400, # 24 horas + '7d': 604800, # 7 dias + '30d': 2592000 # 30 dias + } + + async def analyze_pod_historical_usage( + self, + pod: PodResource, + time_range: str = '24h' + ) -> List[ResourceValidation]: + """Analisar uso histórico de um pod""" + validations = [] + + if time_range not in self.time_ranges: + time_range = '24h' + + end_time = datetime.now() + start_time = end_time - timedelta(seconds=self.time_ranges[time_range]) + + try: + # Analisar CPU + cpu_analysis = await self._analyze_cpu_usage( + pod, start_time, end_time, time_range + ) + validations.extend(cpu_analysis) + + # Analisar memória + memory_analysis = await self._analyze_memory_usage( + pod, start_time, end_time, time_range + ) + validations.extend(memory_analysis) + + except Exception as e: + logger.error(f"Erro na análise histórica do pod {pod.name}: {e}") + validations.append(ResourceValidation( + pod_name=pod.name, + namespace=pod.namespace, + container_name="all", + validation_type="historical_analysis_error", + severity="warning", + message=f"Erro na análise histórica: {str(e)}", + recommendation="Verificar conectividade com Prometheus" + )) + + return validations + + async def _analyze_cpu_usage( + self, + pod: PodResource, + start_time: datetime, + end_time: datetime, + time_range: str + ) -> List[ResourceValidation]: + """Analisar uso histórico de CPU""" + validations = [] + + for container in pod.containers: + container_name = container["name"] + + try: + # Query para CPU usage rate + cpu_query = f''' + rate(container_cpu_usage_seconds_total{{ + pod="{pod.name}", + namespace="{pod.namespace}", + container="{container_name}", + container!="POD", + container!="" + }}[{time_range}]) + ''' + + # Query para CPU requests + cpu_requests_query = f''' + kube_pod_container_resource_requests{{ + pod="{pod.name}", + namespace="{pod.namespace}", + resource="cpu" + }} + ''' + + # Query para CPU limits + cpu_limits_query = f''' + kube_pod_container_resource_limits{{ + pod="{pod.name}", + namespace="{pod.namespace}", + resource="cpu" + }} + ''' + + # Executar queries + cpu_usage = await self._query_prometheus(cpu_query, start_time, end_time) + cpu_requests = await self._query_prometheus(cpu_requests_query, start_time, end_time) + cpu_limits = await self._query_prometheus(cpu_limits_query, start_time, end_time) + + if cpu_usage and cpu_requests: + analysis = self._analyze_cpu_metrics( + pod.name, pod.namespace, container_name, + cpu_usage, cpu_requests, cpu_limits, time_range + ) + validations.extend(analysis) + + except Exception as e: + logger.warning(f"Erro ao analisar CPU do container {container_name}: {e}") + + return validations + + async def _analyze_memory_usage( + self, + pod: PodResource, + start_time: datetime, + end_time: datetime, + time_range: str + ) -> List[ResourceValidation]: + """Analisar uso histórico de memória""" + validations = [] + + for container in pod.containers: + container_name = container["name"] + + try: + # Query para memória usage + memory_query = f''' + container_memory_working_set_bytes{{ + pod="{pod.name}", + namespace="{pod.namespace}", + container="{container_name}", + container!="POD", + container!="" + }} + ''' + + # Query para memória requests + memory_requests_query = f''' + kube_pod_container_resource_requests{{ + pod="{pod.name}", + namespace="{pod.namespace}", + resource="memory" + }} + ''' + + # Query para memória limits + memory_limits_query = f''' + kube_pod_container_resource_limits{{ + pod="{pod.name}", + namespace="{pod.namespace}", + resource="memory" + }} + ''' + + # Executar queries + memory_usage = await self._query_prometheus(memory_query, start_time, end_time) + memory_requests = await self._query_prometheus(memory_requests_query, start_time, end_time) + memory_limits = await self._query_prometheus(memory_limits_query, start_time, end_time) + + if memory_usage and memory_requests: + analysis = self._analyze_memory_metrics( + pod.name, pod.namespace, container_name, + memory_usage, memory_requests, memory_limits, time_range + ) + validations.extend(analysis) + + except Exception as e: + logger.warning(f"Erro ao analisar memória do container {container_name}: {e}") + + return validations + + def _analyze_cpu_metrics( + self, + pod_name: str, + namespace: str, + container_name: str, + usage_data: List[Dict], + requests_data: List[Dict], + limits_data: List[Dict], + time_range: str + ) -> List[ResourceValidation]: + """Analisar métricas de CPU""" + validations = [] + + if not usage_data or not requests_data: + return validations + + # Calcular estatísticas de uso + usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN'] + if not usage_values: + return validations + + # Valores atuais de requests/limits + current_requests = float(requests_data[0][1]) if requests_data else 0 + current_limits = float(limits_data[0][1]) if limits_data else 0 + + # Estatísticas de uso + avg_usage = sum(usage_values) / len(usage_values) + max_usage = max(usage_values) + p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)] + p99_usage = sorted(usage_values)[int(len(usage_values) * 0.99)] + + # Análise de adequação dos requests + if current_requests > 0: + # Request muito alto (uso médio < 50% do request) + if avg_usage < current_requests * 0.5: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"CPU request muito alto: uso médio {avg_usage:.3f} cores vs request {current_requests:.3f} cores", + recommendation=f"Considerar reduzir CPU request para ~{avg_usage * 1.2:.3f} cores (baseado em {time_range} de uso)" + )) + + # Request muito baixo (uso P95 > 80% do request) + elif p95_usage > current_requests * 0.8: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"CPU request pode ser insuficiente: P95 {p95_usage:.3f} cores vs request {current_requests:.3f} cores", + recommendation=f"Considerar aumentar CPU request para ~{p95_usage * 1.2:.3f} cores (baseado em {time_range} de uso)" + )) + + # Análise de adequação dos limits + if current_limits > 0: + # Limit muito alto (uso P99 < 50% do limit) + if p99_usage < current_limits * 0.5: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="info", + message=f"CPU limit muito alto: P99 {p99_usage:.3f} cores vs limit {current_limits:.3f} cores", + recommendation=f"Considerar reduzir CPU limit para ~{p99_usage * 1.5:.3f} cores (baseado em {time_range} de uso)" + )) + + # Limit muito baixo (uso máximo > 90% do limit) + elif max_usage > current_limits * 0.9: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"CPU limit pode ser insuficiente: uso máximo {max_usage:.3f} cores vs limit {current_limits:.3f} cores", + recommendation=f"Considerar aumentar CPU limit para ~{max_usage * 1.2:.3f} cores (baseado em {time_range} de uso)" + )) + + return validations + + def _analyze_memory_metrics( + self, + pod_name: str, + namespace: str, + container_name: str, + usage_data: List[Dict], + requests_data: List[Dict], + limits_data: List[Dict], + time_range: str + ) -> List[ResourceValidation]: + """Analisar métricas de memória""" + validations = [] + + if not usage_data or not requests_data: + return validations + + # Calcular estatísticas de uso + usage_values = [float(point[1]) for point in usage_data if point[1] != 'NaN'] + if not usage_values: + return validations + + # Valores atuais de requests/limits (em bytes) + current_requests = float(requests_data[0][1]) if requests_data else 0 + current_limits = float(limits_data[0][1]) if limits_data else 0 + + # Estatísticas de uso + avg_usage = sum(usage_values) / len(usage_values) + max_usage = max(usage_values) + p95_usage = sorted(usage_values)[int(len(usage_values) * 0.95)] + p99_usage = sorted(usage_values)[int(len(usage_values) * 0.99)] + + # Converter para MiB para melhor legibilidade + def bytes_to_mib(bytes_value): + return bytes_value / (1024 * 1024) + + # Análise de adequação dos requests + if current_requests > 0: + # Request muito alto (uso médio < 50% do request) + if avg_usage < current_requests * 0.5: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"Memória request muito alto: uso médio {bytes_to_mib(avg_usage):.1f}Mi vs request {bytes_to_mib(current_requests):.1f}Mi", + recommendation=f"Considerar reduzir memória request para ~{bytes_to_mib(avg_usage * 1.2):.1f}Mi (baseado em {time_range} de uso)" + )) + + # Request muito baixo (uso P95 > 80% do request) + elif p95_usage > current_requests * 0.8: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"Memória request pode ser insuficiente: P95 {bytes_to_mib(p95_usage):.1f}Mi vs request {bytes_to_mib(current_requests):.1f}Mi", + recommendation=f"Considerar aumentar memória request para ~{bytes_to_mib(p95_usage * 1.2):.1f}Mi (baseado em {time_range} de uso)" + )) + + # Análise de adequação dos limits + if current_limits > 0: + # Limit muito alto (uso P99 < 50% do limit) + if p99_usage < current_limits * 0.5: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="info", + message=f"Memória limit muito alto: P99 {bytes_to_mib(p99_usage):.1f}Mi vs limit {bytes_to_mib(current_limits):.1f}Mi", + recommendation=f"Considerar reduzir memória limit para ~{bytes_to_mib(p99_usage * 1.5):.1f}Mi (baseado em {time_range} de uso)" + )) + + # Limit muito baixo (uso máximo > 90% do limit) + elif max_usage > current_limits * 0.9: + validations.append(ResourceValidation( + pod_name=pod_name, + namespace=namespace, + container_name=container_name, + validation_type="historical_analysis", + severity="warning", + message=f"Memória limit pode ser insuficiente: uso máximo {bytes_to_mib(max_usage):.1f}Mi vs limit {bytes_to_mib(current_limits):.1f}Mi", + recommendation=f"Considerar aumentar memória limit para ~{bytes_to_mib(max_usage * 1.2):.1f}Mi (baseado em {time_range} de uso)" + )) + + return validations + + async def _query_prometheus(self, query: str, start_time: datetime, end_time: datetime) -> List[Dict]: + """Executar query no Prometheus""" + try: + async with aiohttp.ClientSession() as session: + params = { + 'query': query, + 'start': start_time.timestamp(), + 'end': end_time.timestamp(), + 'step': '60s' # 1 minuto de resolução + } + + async with session.get( + f"{self.prometheus_url}/api/v1/query_range", + params=params, + timeout=aiohttp.ClientTimeout(total=30) + ) as response: + if response.status == 200: + data = await response.json() + if data['status'] == 'success' and data['data']['result']: + return data['data']['result'][0]['values'] + else: + logger.warning(f"Prometheus query failed: {response.status}") + return [] + except Exception as e: + logger.error(f"Erro ao consultar Prometheus: {e}") + return [] + + async def get_cluster_historical_summary(self, time_range: str = '24h') -> Dict[str, Any]: + """Obter resumo histórico do cluster""" + try: + # Query para CPU total do cluster + cpu_query = f''' + sum(rate(container_cpu_usage_seconds_total{{ + container!="POD", + container!="" + }}[{time_range}])) + ''' + + # Query para memória total do cluster + memory_query = f''' + sum(container_memory_working_set_bytes{{ + container!="POD", + container!="" + }}) + ''' + + # Query para requests totais + cpu_requests_query = f''' + sum(kube_pod_container_resource_requests{{resource="cpu"}}) + ''' + + memory_requests_query = f''' + sum(kube_pod_container_resource_requests{{resource="memory"}}) + ''' + + # Executar queries + cpu_usage = await self._query_prometheus(cpu_query, + datetime.now() - timedelta(seconds=self.time_ranges[time_range]), + datetime.now()) + memory_usage = await self._query_prometheus(memory_query, + datetime.now() - timedelta(seconds=self.time_ranges[time_range]), + datetime.now()) + cpu_requests = await self._query_prometheus(cpu_requests_query, + datetime.now() - timedelta(seconds=self.time_ranges[time_range]), + datetime.now()) + memory_requests = await self._query_prometheus(memory_requests_query, + datetime.now() - timedelta(seconds=self.time_ranges[time_range]), + datetime.now()) + + return { + 'time_range': time_range, + 'cpu_usage': float(cpu_usage[0][1]) if cpu_usage else 0, + 'memory_usage': float(memory_usage[0][1]) if memory_usage else 0, + 'cpu_requests': float(cpu_requests[0][1]) if cpu_requests else 0, + 'memory_requests': float(memory_requests[0][1]) if memory_requests else 0, + 'cpu_utilization': (float(cpu_usage[0][1]) / float(cpu_requests[0][1]) * 100) if cpu_usage and cpu_requests and cpu_requests[0][1] != '0' else 0, + 'memory_utilization': (float(memory_usage[0][1]) / float(memory_requests[0][1]) * 100) if memory_usage and memory_requests and memory_requests[0][1] != '0' else 0 + } + + except Exception as e: + logger.error(f"Erro ao obter resumo histórico: {e}") + return {} diff --git a/app/services/validation_service.py b/app/services/validation_service.py index 606b77b..f8452a2 100644 --- a/app/services/validation_service.py +++ b/app/services/validation_service.py @@ -8,6 +8,7 @@ import re from app.models.resource_models import PodResource, ResourceValidation, NamespaceResources from app.core.config import settings +from app.services.historical_analysis import HistoricalAnalysisService logger = logging.getLogger(__name__) @@ -19,6 +20,7 @@ class ValidationService: self.memory_ratio = settings.memory_limit_ratio self.min_cpu_request = settings.min_cpu_request self.min_memory_request = settings.min_memory_request + self.historical_analysis = HistoricalAnalysisService() def validate_pod_resources(self, pod: PodResource) -> List[ResourceValidation]: """Validar recursos de um pod""" @@ -32,6 +34,26 @@ class ValidationService: return validations + async def validate_pod_resources_with_historical_analysis( + self, + pod: PodResource, + time_range: str = '24h' + ) -> List[ResourceValidation]: + """Validar recursos de um pod incluindo análise histórica""" + # Validações estáticas + static_validations = self.validate_pod_resources(pod) + + # Análise histórica + try: + historical_validations = await self.historical_analysis.analyze_pod_historical_usage( + pod, time_range + ) + static_validations.extend(historical_validations) + except Exception as e: + logger.warning(f"Erro na análise histórica do pod {pod.name}: {e}") + + return static_validations + def _validate_container_resources( self, pod_name: str, diff --git a/app/static/index.html b/app/static/index.html index 4b3610a..c56c2d4 100644 --- a/app/static/index.html +++ b/app/static/index.html @@ -133,8 +133,10 @@ .validation-item { padding: 1rem; border-left: 4px solid #ccc; - margin: 0.5rem 0; + margin: 0.75rem 0; background: #f8f9fa; + border-radius: 6px; + border: 1px solid #dee2e6; } .validation-item.error { @@ -231,7 +233,7 @@ border: 1px solid #ddd; border-radius: 8px; margin-bottom: 1rem; - overflow: hidden; + overflow: visible; } .accordion-header { @@ -272,14 +274,27 @@ } .accordion-content { - padding: 0; + padding: 1rem 1.5rem; max-height: 0; overflow: hidden; transition: max-height 0.3s ease; + background: white; + border-top: 1px solid #dee2e6; } .accordion-content.active { - max-height: 1000px; + max-height: none; + overflow: visible; + padding: 1rem 1.5rem; + } + + /* Garantir que o conteúdo não seja cortado */ + .accordion-content .validation-item:last-child { + margin-bottom: 0; + } + + .accordion-content .historical-validation:last-child { + margin-bottom: 0; } .pod-list { @@ -367,6 +382,74 @@ cursor: not-allowed; } + /* Historical Analysis Styles */ + .historical-summary { + background: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 8px; + padding: 1rem; + margin-bottom: 1rem; + } + + .historical-summary h3 { + margin: 0 0 1rem 0; + color: #495057; + } + + .historical-stats { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 1rem; + } + + .historical-stat { + background: white; + padding: 0.75rem; + border-radius: 6px; + border-left: 4px solid #007bff; + } + + .historical-stat h4 { + margin: 0 0 0.5rem 0; + font-size: 0.9rem; + color: #6c757d; + } + + .historical-stat .value { + font-size: 1.5rem; + font-weight: bold; + color: #007bff; + } + + .historical-validation { + background: #fff3cd; + border: 1px solid #ffeaa7; + border-radius: 6px; + padding: 1rem; + margin-bottom: 0.75rem; + } + + .historical-validation.error { + background: #f8d7da; + border-color: #f5c6cb; + } + + .historical-validation.warning { + background: #fff3cd; + border-color: #ffeaa7; + } + + .historical-validation.info { + background: #d1ecf1; + border-color: #bee5eb; + } + + .historical-validation.critical { + background: #f8d7da; + border-color: #f5c6cb; + border-left: 4px solid #dc3545; + } + .pagination button.active { background: #cc0000; color: white; @@ -470,6 +553,7 @@
+
@@ -494,6 +578,39 @@ + + +