feat: implement Phase 2 - Sequential Pipeline with Celery Workers
This commit is contained in:
@@ -1982,34 +1982,79 @@
|
|||||||
// Show fullscreen loading modal
|
// Show fullscreen loading modal
|
||||||
showFullscreenLoading(
|
showFullscreenLoading(
|
||||||
'Analyzing Cluster Resources',
|
'Analyzing Cluster Resources',
|
||||||
'Please wait while we analyze your cluster resources and generate insights... This may take up to 60 seconds for large clusters.'
|
'Starting background analysis pipeline... This may take up to 2 minutes for large clusters.'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Start smart loading system
|
// Start smart loading system
|
||||||
startSmartLoading();
|
startSmartLoading();
|
||||||
|
|
||||||
// Step 1: Load cluster status
|
// Step 1: Start background cluster analysis
|
||||||
updateSmartProgress(0, 'Connecting to OpenShift API...');
|
updateSmartProgress(0, 'Starting background cluster analysis...');
|
||||||
|
|
||||||
const clusterResponse = await fetch('/api/v1/cluster/status');
|
const analysisResponse = await fetch('/api/v1/tasks/cluster/analyze', {
|
||||||
if (!clusterResponse.ok) {
|
method: 'POST',
|
||||||
throw new Error(`HTTP error! status: ${clusterResponse.status}`);
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!analysisResponse.ok) {
|
||||||
|
throw new Error(`HTTP error! status: ${analysisResponse.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const clusterData = await clusterResponse.json();
|
const analysisData = await analysisResponse.json();
|
||||||
updateSmartProgress(1, 'Cluster data loaded successfully');
|
const taskId = analysisData.task_id;
|
||||||
|
|
||||||
// Update metrics cards
|
updateSmartProgress(1, 'Background analysis started, monitoring progress...');
|
||||||
updateMetricsCards(clusterData);
|
|
||||||
|
|
||||||
// Step 2: Load dashboard charts
|
// Step 2: Monitor task progress
|
||||||
updateSmartProgress(2, 'Loading dashboard charts...');
|
let taskCompleted = false;
|
||||||
await loadDashboardCharts();
|
let attempts = 0;
|
||||||
|
const maxAttempts = 120; // 2 minutes max
|
||||||
|
|
||||||
// Step 3: Complete
|
while (!taskCompleted && attempts < maxAttempts) {
|
||||||
updateSmartProgress(3, 'Analysis complete');
|
await new Promise(resolve => setTimeout(resolve, 1000)); // Wait 1 second
|
||||||
|
attempts++;
|
||||||
|
|
||||||
currentData = { cluster: clusterData };
|
try {
|
||||||
|
const statusResponse = await fetch(`/api/v1/tasks/${taskId}/status`);
|
||||||
|
if (!statusResponse.ok) {
|
||||||
|
throw new Error(`HTTP error! status: ${statusResponse.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const statusData = await statusResponse.json();
|
||||||
|
|
||||||
|
if (statusData.state === 'PROGRESS') {
|
||||||
|
const progress = Math.round((statusData.current / statusData.total) * 100);
|
||||||
|
updateSmartProgress(progress, statusData.status);
|
||||||
|
} else if (statusData.state === 'SUCCESS') {
|
||||||
|
updateSmartProgress(100, 'Analysis completed successfully!');
|
||||||
|
|
||||||
|
// Update metrics cards with results
|
||||||
|
updateMetricsCards(statusData.result);
|
||||||
|
|
||||||
|
// Load dashboard charts
|
||||||
|
updateSmartProgress(100, 'Loading dashboard charts...');
|
||||||
|
await loadDashboardCharts();
|
||||||
|
|
||||||
|
currentData = { cluster: statusData.result };
|
||||||
|
taskCompleted = true;
|
||||||
|
|
||||||
|
} else if (statusData.state === 'FAILURE') {
|
||||||
|
throw new Error(`Analysis failed: ${statusData.error}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.warn('Error checking task status:', error);
|
||||||
|
if (attempts >= maxAttempts) {
|
||||||
|
throw new Error('Analysis timeout - please try again');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!taskCompleted) {
|
||||||
|
throw new Error('Analysis timeout - please try again');
|
||||||
|
}
|
||||||
|
|
||||||
// Stop smart loading and hide modal
|
// Stop smart loading and hide modal
|
||||||
stopSmartLoading();
|
stopSmartLoading();
|
||||||
|
|||||||
120
k8s/celery-worker-deployment.yaml
Normal file
120
k8s/celery-worker-deployment.yaml
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: celery-worker
|
||||||
|
namespace: resource-governance
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: celery-worker
|
||||||
|
app.kubernetes.io/component: worker
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: celery-worker
|
||||||
|
app.kubernetes.io/component: worker
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: celery-worker
|
||||||
|
app.kubernetes.io/component: worker
|
||||||
|
spec:
|
||||||
|
serviceAccountName: resource-governance-sa
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
containers:
|
||||||
|
- name: celery-worker
|
||||||
|
image: quay.io/rh_ee_anobre/resource-governance:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
command: ["python", "app/workers/celery_worker.py"]
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
env:
|
||||||
|
- name: KUBECONFIG
|
||||||
|
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||||
|
- name: REDIS_URL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: redis-config
|
||||||
|
key: REDIS_URL
|
||||||
|
- name: CELERY_BROKER_URL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: redis-config
|
||||||
|
key: CELERY_BROKER_URL
|
||||||
|
- name: CELERY_RESULT_BACKEND
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: redis-config
|
||||||
|
key: CELERY_RESULT_BACKEND
|
||||||
|
- name: CPU_LIMIT_RATIO
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: CPU_LIMIT_RATIO
|
||||||
|
- name: MEMORY_LIMIT_RATIO
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: MEMORY_LIMIT_RATIO
|
||||||
|
- name: MIN_CPU_REQUEST
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: MIN_CPU_REQUEST
|
||||||
|
- name: MIN_MEMORY_REQUEST
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: MIN_MEMORY_REQUEST
|
||||||
|
- name: CRITICAL_NAMESPACES
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: CRITICAL_NAMESPACES
|
||||||
|
- name: INCLUDE_SYSTEM_NAMESPACES
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: INCLUDE_SYSTEM_NAMESPACES
|
||||||
|
- name: SYSTEM_NAMESPACE_PREFIXES
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: SYSTEM_NAMESPACE_PREFIXES
|
||||||
|
- name: PROMETHEUS_URL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: PROMETHEUS_URL
|
||||||
|
- name: REPORT_EXPORT_PATH
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: REPORT_EXPORT_PATH
|
||||||
|
- name: SERVICE_ACCOUNT_NAME
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: resource-governance-config
|
||||||
|
key: SERVICE_ACCOUNT_NAME
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 128Mi
|
||||||
|
limits:
|
||||||
|
cpu: 500m
|
||||||
|
memory: 512Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: service-account-token
|
||||||
|
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: service-account-token
|
||||||
|
secret:
|
||||||
|
secretName: resource-governance-sa-token
|
||||||
|
optional: false
|
||||||
|
restartPolicy: Always
|
||||||
@@ -8,6 +8,7 @@ resources:
|
|||||||
- redis-configmap.yaml
|
- redis-configmap.yaml
|
||||||
- redis-deployment.yaml
|
- redis-deployment.yaml
|
||||||
- deployment.yaml
|
- deployment.yaml
|
||||||
|
- celery-worker-deployment.yaml
|
||||||
- service.yaml
|
- service.yaml
|
||||||
- route.yaml
|
- route.yaml
|
||||||
|
|
||||||
|
|||||||
@@ -57,6 +57,10 @@ fi
|
|||||||
echo -e "${YELLOW}Applying Deployment...${NC}"
|
echo -e "${YELLOW}Applying Deployment...${NC}"
|
||||||
oc apply -f k8s/deployment.yaml
|
oc apply -f k8s/deployment.yaml
|
||||||
|
|
||||||
|
# Apply Celery Worker Deployment
|
||||||
|
echo -e "${YELLOW}Applying Celery Worker Deployment...${NC}"
|
||||||
|
oc apply -f k8s/celery-worker-deployment.yaml
|
||||||
|
|
||||||
# Apply Service
|
# Apply Service
|
||||||
echo -e "${YELLOW}Applying Service...${NC}"
|
echo -e "${YELLOW}Applying Service...${NC}"
|
||||||
oc apply -f k8s/service.yaml
|
oc apply -f k8s/service.yaml
|
||||||
|
|||||||
Reference in New Issue
Block a user