Remove che.infra.kubernetes.pvc.jobs.image, che.infra.kubernetes.pvc.jobs.image.pull_policy, che.infra.kubernetes.pvc.jobs.memorylimit properties and some PVC-related classes
Signed-off-by: Andrew Obuchowicz <aobuchow@redhat.com>pull/344/head
parent
d9870829fa
commit
971acbd7a2
|
|
@ -380,16 +380,6 @@ che.infra.kubernetes.ingress_start_timeout_min=5
|
|||
# A failed container startup is handled explicitly by {prod-short} server.
|
||||
che.infra.kubernetes.workspace_unrecoverable_events=FailedMount,FailedScheduling,MountVolume.SetUp failed,Failed to pull image,FailedCreate,ReplicaSetCreateError
|
||||
|
||||
|
||||
# Pod that is launched when performing persistent volume claim maintenance jobs on OpenShift
|
||||
che.infra.kubernetes.pvc.jobs.image=registry.access.redhat.com/ubi8-minimal:8.3-230
|
||||
|
||||
# Image pull policy of container that used for the maintenance jobs on {orch-name} cluster
|
||||
che.infra.kubernetes.pvc.jobs.image.pull_policy=IfNotPresent
|
||||
|
||||
# Defines Pod memory limit for persistent volume claim maintenance jobs
|
||||
che.infra.kubernetes.pvc.jobs.memorylimit=250Mi
|
||||
|
||||
# Defines annotations for ingresses which are used for servers exposing. Value depends on the kind of ingress
|
||||
# controller.
|
||||
#
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ che.infra.kubernetes.trust_certs=che.infra.openshift.trust_certs
|
|||
che.infra.kubernetes.bootstrapper.binary_url=che.infra.openshift.bootstrapper.binary_url
|
||||
che.infra.kubernetes.bootstrapper.installer_timeout_sec=che.infra.openshift.bootstrapper.installer_timeout_sec
|
||||
che.infra.kubernetes.bootstrapper.server_check_period_sec=che.infra.openshift.bootstrapper.server_check_period_sec
|
||||
che.infra.kubernetes.pvc.jobs.image=che.infra.openshift.pvc.jobs.image
|
||||
che.infra.kubernetes.pvc.jobs.memorylimit=che.infra.openshift.pvc.jobs.memorylimit
|
||||
che.infra.kubernetes.tls_enabled=che.infra.openshift.tls_enabled
|
||||
che.infra.kubernetes.workspace_sa_cluster_roles=che.infra.kubernetes.cluster_role_name
|
||||
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.eclipse.che.api.workspace.shared.Constants.PERSIST_VOLUMES_ATTRIBUTE;
|
||||
|
||||
import java.util.Map;
|
||||
import org.eclipse.che.api.core.model.workspace.Workspace;
|
||||
import org.eclipse.che.api.core.model.workspace.devfile.Devfile;
|
||||
|
||||
public class EphemeralWorkspaceUtility {
|
||||
|
||||
/**
|
||||
* @param workspaceAttributes workspace config or devfile attributes to check is ephemeral mode is
|
||||
* enabled
|
||||
* @return true if `persistVolumes` attribute exists and set to 'false'. In this case regardless
|
||||
* of the PVC strategy, workspace volumes would be created as `emptyDir`. When a workspace Pod
|
||||
* is removed for any reason, the data in the `emptyDir` volume is deleted forever
|
||||
*/
|
||||
public static boolean isEphemeral(Map<String, String> workspaceAttributes) {
|
||||
String persistVolumes = workspaceAttributes.get(PERSIST_VOLUMES_ATTRIBUTE);
|
||||
return "false".equals(persistVolumes);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param workspace workspace to check is ephemeral mode is enabled
|
||||
* @return true if workspace config contains `persistVolumes` attribute which is set to false. In
|
||||
* this case regardless of the PVC strategy, workspace volumes would be created as `emptyDir`.
|
||||
* When a workspace Pod is removed for any reason, the data in the `emptyDir` volume is
|
||||
* deleted forever
|
||||
*/
|
||||
public static boolean isEphemeral(Workspace workspace) {
|
||||
Devfile devfile = workspace.getDevfile();
|
||||
if (devfile != null) {
|
||||
return isEphemeral(devfile.getAttributes());
|
||||
}
|
||||
|
||||
return isEphemeral(workspace.getConfig().getAttributes());
|
||||
}
|
||||
|
||||
/**
|
||||
* Change workspace attributes such that future calls to {@link #isEphemeral(Map)} will return
|
||||
* true.
|
||||
*
|
||||
* @param workspaceAttributes workspace config or devfile attributes to which ephemeral mode
|
||||
* configuration should be provisioned
|
||||
*/
|
||||
public static void makeEphemeral(Map<String, String> workspaceAttributes) {
|
||||
workspaceAttributes.put(PERSIST_VOLUMES_ATTRIBUTE, "false");
|
||||
}
|
||||
}
|
||||
|
|
@ -1,209 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.Constants.CHE_VOLUME_NAME_LABEL;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.Constants.CHE_WORKSPACE_ID_LABEL;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.newPVC;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.newVolume;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.newVolumeMount;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.putLabel;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.provision.LogsVolumeMachineProvisioner.LOGS_VOLUME_NAME;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.Container;
|
||||
import io.fabric8.kubernetes.api.model.ObjectMeta;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
|
||||
import io.fabric8.kubernetes.api.model.PodSpec;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import javax.inject.Inject;
|
||||
import org.eclipse.che.api.core.model.workspace.config.MachineConfig;
|
||||
import org.eclipse.che.api.core.model.workspace.config.Volume;
|
||||
import org.eclipse.che.api.workspace.server.spi.environment.InternalMachineConfig;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.Names;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment.PodData;
|
||||
|
||||
/**
|
||||
* Helps to work with {@link PersistentVolumeClaim} and provision them to {@link
|
||||
* KubernetesEnvironment}.
|
||||
*
|
||||
* @author Sergii Leshchenko
|
||||
*/
|
||||
public class PVCProvisioner {
|
||||
|
||||
private final String pvcNamePrefix;
|
||||
private final String pvcQuantity;
|
||||
private final String pvcAccessMode;
|
||||
private final String pvcStorageClassName;
|
||||
private final PodsVolumes podsVolumes;
|
||||
|
||||
@Inject
|
||||
public PVCProvisioner(PodsVolumes podsVolumes) {
|
||||
this.pvcNamePrefix = "TEST";
|
||||
this.pvcQuantity = "test";
|
||||
this.pvcAccessMode = "TEST";
|
||||
this.pvcStorageClassName = "TEST";
|
||||
this.podsVolumes = podsVolumes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts {@link Volume} specified in {@link MachineConfig#getVolumes()} to {@link
|
||||
* PersistentVolumeClaim}s and provision them to {@link KubernetesEnvironment}. The machines
|
||||
* corresponding pods and containers are updated in accordance.
|
||||
*
|
||||
* @param k8sEnv environment to provision
|
||||
* @param workspaceId identifier of workspace to which the specified environment belongs to
|
||||
*/
|
||||
public void convertCheVolumes(KubernetesEnvironment k8sEnv, String workspaceId) {
|
||||
Map<String, PersistentVolumeClaim> volumeName2PVC =
|
||||
groupByVolumeName(k8sEnv.getPersistentVolumeClaims().values());
|
||||
|
||||
for (PodData pod : k8sEnv.getPodsData().values()) {
|
||||
final PodSpec podSpec = pod.getSpec();
|
||||
List<Container> containers = new ArrayList<>();
|
||||
containers.addAll(podSpec.getContainers());
|
||||
containers.addAll(podSpec.getInitContainers());
|
||||
for (Container container : containers) {
|
||||
final String machineName = Names.machineName(pod, container);
|
||||
InternalMachineConfig machineConfig = k8sEnv.getMachines().get(machineName);
|
||||
if (machineConfig == null) {
|
||||
continue;
|
||||
}
|
||||
Map<String, Volume> volumes = machineConfig.getVolumes();
|
||||
addMachineVolumes(workspaceId, k8sEnv, volumeName2PVC, pod, container, volumes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provision the specified PVCs to the environment.
|
||||
*
|
||||
* <p>Note that:<br>
|
||||
* - PVC is not provisioned if environment already contains PVC for corresponding volume;<br>
|
||||
* - PVC is provisioned with generated unique name;<br>
|
||||
* - corresponding PVC references in Kubernetes Environment are updated during provisioning;<br>
|
||||
*
|
||||
* @param k8sEnv environment to provision
|
||||
* @param toProvision PVCs that should be provisioned to the environment
|
||||
*/
|
||||
public void provision(
|
||||
KubernetesEnvironment k8sEnv, Map<String, PersistentVolumeClaim> toProvision) {
|
||||
final Map<String, PersistentVolumeClaim> volumeName2PVC =
|
||||
groupByVolumeName(k8sEnv.getPersistentVolumeClaims().values());
|
||||
|
||||
// process user-defined PVCs according to unique strategy
|
||||
final Map<String, PersistentVolumeClaim> envClaims = k8sEnv.getPersistentVolumeClaims();
|
||||
for (PersistentVolumeClaim pvc : toProvision.values()) {
|
||||
String originalPVCName = pvc.getMetadata().getName();
|
||||
|
||||
PersistentVolumeClaim existingPVC = volumeName2PVC.get(originalPVCName);
|
||||
|
||||
if (existingPVC != null) {
|
||||
// Replace pvc in environment with existing. Fix the references in Pods
|
||||
podsVolumes.changePVCReferences(
|
||||
k8sEnv.getPodsData().values(), originalPVCName, existingPVC.getMetadata().getName());
|
||||
} else {
|
||||
// there is no the corresponding existing pvc
|
||||
// new one should be created with generated name
|
||||
putLabel(pvc, CHE_VOLUME_NAME_LABEL, originalPVCName);
|
||||
|
||||
final String uniqueName = Names.generateName(pvcNamePrefix + '-');
|
||||
pvc.getMetadata().setName(uniqueName);
|
||||
envClaims.put(uniqueName, pvc);
|
||||
|
||||
volumeName2PVC.put(originalPVCName, pvc);
|
||||
podsVolumes.changePVCReferences(k8sEnv.getPodsData().values(), originalPVCName, uniqueName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups list of given PVCs by volume name. The result may be used for easy accessing to PVCs by
|
||||
* Che Volume name.
|
||||
*/
|
||||
private Map<String, PersistentVolumeClaim> groupByVolumeName(
|
||||
Collection<PersistentVolumeClaim> pvcs) {
|
||||
final Map<String, PersistentVolumeClaim> grouped = new HashMap<>();
|
||||
for (PersistentVolumeClaim pvc : pvcs) {
|
||||
final ObjectMeta metadata = pvc.getMetadata();
|
||||
final String volumeName;
|
||||
if (metadata.getLabels() != null
|
||||
&& (volumeName = metadata.getLabels().get(CHE_VOLUME_NAME_LABEL)) != null) {
|
||||
grouped.put(volumeName, pvc);
|
||||
} else {
|
||||
grouped.put(metadata.getName(), pvc);
|
||||
putLabel(metadata, CHE_VOLUME_NAME_LABEL, metadata.getName());
|
||||
}
|
||||
}
|
||||
return grouped;
|
||||
}
|
||||
|
||||
private void addMachineVolumes(
|
||||
String workspaceId,
|
||||
KubernetesEnvironment k8sEnv,
|
||||
Map<String, PersistentVolumeClaim> volumeName2PVC,
|
||||
PodData pod,
|
||||
Container container,
|
||||
Map<String, Volume> volumes) {
|
||||
if (volumes.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (Entry<String, Volume> volumeEntry : volumes.entrySet()) {
|
||||
final String volumePath = volumeEntry.getValue().getPath();
|
||||
final String volumeName =
|
||||
LOGS_VOLUME_NAME.equals(volumeEntry.getKey())
|
||||
? volumeEntry.getKey() + '-' + pod.getMetadata().getName()
|
||||
: volumeEntry.getKey();
|
||||
final PersistentVolumeClaim pvc;
|
||||
// checks whether PVC for given workspace and volume present in environment
|
||||
if (volumeName2PVC.containsKey(volumeName)) {
|
||||
pvc = volumeName2PVC.get(volumeName);
|
||||
}
|
||||
// when PVC is not found in environment then create new one
|
||||
else {
|
||||
final String uniqueName = Names.generateName(pvcNamePrefix);
|
||||
pvc = newPVC(uniqueName, pvcAccessMode, pvcQuantity, pvcStorageClassName);
|
||||
putLabel(pvc, CHE_WORKSPACE_ID_LABEL, workspaceId);
|
||||
putLabel(pvc, CHE_VOLUME_NAME_LABEL, volumeName);
|
||||
k8sEnv.getPersistentVolumeClaims().put(uniqueName, pvc);
|
||||
volumeName2PVC.put(volumeName, pvc);
|
||||
}
|
||||
|
||||
// binds pvc to pod and container
|
||||
String pvcName = pvc.getMetadata().getName();
|
||||
PodSpec podSpec = pod.getSpec();
|
||||
Optional<io.fabric8.kubernetes.api.model.Volume> volumeOpt =
|
||||
podSpec.getVolumes().stream()
|
||||
.filter(
|
||||
volume ->
|
||||
volume.getPersistentVolumeClaim() != null
|
||||
&& pvcName.equals(volume.getPersistentVolumeClaim().getClaimName()))
|
||||
.findAny();
|
||||
io.fabric8.kubernetes.api.model.Volume podVolume;
|
||||
if (volumeOpt.isPresent()) {
|
||||
podVolume = volumeOpt.get();
|
||||
} else {
|
||||
podVolume = newVolume(pvcName, pvcName);
|
||||
podSpec.getVolumes().add(podVolume);
|
||||
}
|
||||
|
||||
container.getVolumeMounts().add(newVolumeMount(podVolume.getName(), volumePath, ""));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,470 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.newVolume;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesObjectUtil.newVolumeMount;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Container;
|
||||
import io.fabric8.kubernetes.api.model.ContainerBuilder;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
import io.fabric8.kubernetes.api.model.PodBuilder;
|
||||
import io.fabric8.kubernetes.api.model.PodStatus;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CancellationException;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Stream;
|
||||
import javax.inject.Inject;
|
||||
import javax.inject.Named;
|
||||
import javax.inject.Singleton;
|
||||
import org.eclipse.che.api.core.model.workspace.runtime.RuntimeIdentity;
|
||||
import org.eclipse.che.api.workspace.server.spi.InfrastructureException;
|
||||
import org.eclipse.che.commons.lang.concurrent.LoggingUncaughtExceptionHandler;
|
||||
import org.eclipse.che.commons.lang.concurrent.ThreadLocalPropagateContext;
|
||||
import org.eclipse.che.commons.observability.ExecutorServiceWrapper;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesDeployments;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespace;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespaceFactory;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.event.PodEvent;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.log.LogWatchTimeouts;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.log.LogWatcher;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.log.PodLogToEventPublisher;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.NodeSelectorProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.SecurityContextProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.TolerationsProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.util.Containers;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.util.RuntimeEventsPublisher;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Helps to execute commands needed for workspace PVC preparation and cleanup.
|
||||
*
|
||||
* <p>Creates a short-lived Pod based on CentOS image which mounts a specified PVC and executes a
|
||||
* command (either {@code mkdir -p <path>} or {@code rm -rf <path>}). Reports back whether the pod
|
||||
* succeeded or failed. Supports multiple paths for one command.
|
||||
*
|
||||
* <p>Note that the commands execution is needed only for {@link CommonPVCStrategy}.
|
||||
*
|
||||
* @author amisevsk
|
||||
* @author Anton Korneta
|
||||
*/
|
||||
@Singleton
|
||||
public class PVCSubPathHelper {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(PVCSubPathHelper.class);
|
||||
private static final JobFinishedPredicate POD_PREDICATE = new JobFinishedPredicate();
|
||||
|
||||
static final int COUNT_THREADS = 4;
|
||||
static final int WAIT_POD_TIMEOUT_MIN = 5;
|
||||
|
||||
static final String[] RM_COMMAND_BASE = new String[] {"rm", "-rf"};
|
||||
static final String[] MKDIR_COMMAND_BASE = new String[] {"mkdir", "-m", "777", "-p"};
|
||||
|
||||
static final String POD_RESTART_POLICY = "Never";
|
||||
static final String POD_PHASE_SUCCEEDED = "Succeeded";
|
||||
static final String POD_PHASE_FAILED = "Failed";
|
||||
static final String POD_EVENT_FAILED = "Failed";
|
||||
static final String POD_EVENT_FAILED_SCHEDULING = "FailedScheduling";
|
||||
static final String POD_EVENT_FAILED_MOUNT = "FailedMount";
|
||||
static final String PVC_PHASE_TERMINATING = "Terminating";
|
||||
static final String JOB_MOUNT_PATH = "/tmp/job_mount";
|
||||
|
||||
private final String jobImage;
|
||||
private final String jobMemoryLimit;
|
||||
private final String imagePullPolicy;
|
||||
private final KubernetesNamespaceFactory factory;
|
||||
private final ExecutorService executor;
|
||||
private final RuntimeEventsPublisher eventsPublisher;
|
||||
|
||||
private final SecurityContextProvisioner securityContextProvisioner;
|
||||
private final NodeSelectorProvisioner nodeSelectorProvisioner;
|
||||
private final TolerationsProvisioner tolerationsProvisioner;
|
||||
|
||||
@Inject
|
||||
PVCSubPathHelper(
|
||||
@Named("che.infra.kubernetes.pvc.jobs.memorylimit") String jobMemoryLimit,
|
||||
@Named("che.infra.kubernetes.pvc.jobs.image") String jobImage,
|
||||
@Named("che.infra.kubernetes.pvc.jobs.image.pull_policy") String imagePullPolicy,
|
||||
KubernetesNamespaceFactory factory,
|
||||
SecurityContextProvisioner securityContextProvisioner,
|
||||
NodeSelectorProvisioner nodeSelectorProvisioner,
|
||||
TolerationsProvisioner tolerationsProvisioner,
|
||||
ExecutorServiceWrapper executorServiceWrapper,
|
||||
RuntimeEventsPublisher eventPublisher) {
|
||||
this.jobMemoryLimit = jobMemoryLimit;
|
||||
this.jobImage = jobImage;
|
||||
this.imagePullPolicy = imagePullPolicy;
|
||||
this.factory = factory;
|
||||
this.securityContextProvisioner = securityContextProvisioner;
|
||||
this.nodeSelectorProvisioner = nodeSelectorProvisioner;
|
||||
this.tolerationsProvisioner = tolerationsProvisioner;
|
||||
this.eventsPublisher = eventPublisher;
|
||||
this.executor =
|
||||
executorServiceWrapper.wrap(
|
||||
Executors.newFixedThreadPool(
|
||||
COUNT_THREADS,
|
||||
new ThreadFactoryBuilder()
|
||||
.setNameFormat("PVCSubPathHelper-ThreadPool-%d")
|
||||
.setUncaughtExceptionHandler(LoggingUncaughtExceptionHandler.getInstance())
|
||||
.setDaemon(false)
|
||||
.build()),
|
||||
PVCSubPathHelper.class.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs create workspace directories job by given paths and waits until it finished.
|
||||
*
|
||||
* @param workspaceId workspace identifier
|
||||
* @param dirs workspace directories to create
|
||||
*/
|
||||
void createDirs(
|
||||
RuntimeIdentity identity,
|
||||
String workspaceId,
|
||||
String pvcName,
|
||||
Map<String, String> startOptions,
|
||||
String... dirs) {
|
||||
LOG.debug(
|
||||
"Preparing PVC `{}` for workspace `{}`. Directories to create: {}",
|
||||
pvcName,
|
||||
workspaceId,
|
||||
Arrays.toString(dirs));
|
||||
execute(identity, workspaceId, pvcName, MKDIR_COMMAND_BASE, startOptions, dirs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously starts a job for removing workspace directories by given paths.
|
||||
*
|
||||
* @param workspaceId workspace identifier
|
||||
* @param namespace
|
||||
* @param dirs workspace directories to remove
|
||||
*/
|
||||
CompletableFuture<Void> removeDirsAsync(
|
||||
String workspaceId, String namespace, String pvcName, String... dirs) {
|
||||
LOG.debug(
|
||||
"Removing files in PVC `{}` of workspace `{}`. Directories to remove: {}",
|
||||
pvcName,
|
||||
workspaceId,
|
||||
Arrays.toString(dirs));
|
||||
return CompletableFuture.runAsync(
|
||||
ThreadLocalPropagateContext.wrap(
|
||||
() -> execute(workspaceId, namespace, pvcName, RM_COMMAND_BASE, true, dirs)),
|
||||
executor);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void execute(
|
||||
RuntimeIdentity identity,
|
||||
String workspaceId,
|
||||
String pvcName,
|
||||
String[] commandBase,
|
||||
Map<String, String> startOptions,
|
||||
String... arguments) {
|
||||
execute(
|
||||
identity,
|
||||
workspaceId,
|
||||
identity.getInfrastructureNamespace(),
|
||||
pvcName,
|
||||
commandBase,
|
||||
startOptions,
|
||||
false,
|
||||
arguments);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void execute(
|
||||
String workspaceId,
|
||||
String namespace,
|
||||
String pvcName,
|
||||
String[] commandBase,
|
||||
boolean watchFailureEvents,
|
||||
String... arguments) {
|
||||
execute(
|
||||
null,
|
||||
workspaceId,
|
||||
namespace,
|
||||
pvcName,
|
||||
commandBase,
|
||||
Collections.emptyMap(),
|
||||
watchFailureEvents,
|
||||
arguments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the job with the specified arguments.
|
||||
*
|
||||
* @param namespace
|
||||
* @param commandBase the command base to execute
|
||||
* @param arguments the list of arguments for the specified job
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void execute(
|
||||
String workspaceId,
|
||||
String namespace,
|
||||
String pvcName,
|
||||
String[] commandBase,
|
||||
String... arguments) {
|
||||
execute(
|
||||
null,
|
||||
workspaceId,
|
||||
namespace,
|
||||
pvcName,
|
||||
commandBase,
|
||||
Collections.emptyMap(),
|
||||
false,
|
||||
arguments);
|
||||
}
|
||||
|
||||
private void execute(
|
||||
RuntimeIdentity identity,
|
||||
String workspaceId,
|
||||
String namespace,
|
||||
String pvcName,
|
||||
String[] commandBase,
|
||||
Map<String, String> startOptions,
|
||||
boolean watchFailureEvents,
|
||||
String... arguments) {
|
||||
final String jobName = commandBase[0];
|
||||
final String podName = jobName + '-' + workspaceId;
|
||||
final String[] command = buildCommand(commandBase, arguments);
|
||||
final Pod pod = newPod(podName, pvcName, command);
|
||||
securityContextProvisioner.provision(pod.getSpec());
|
||||
nodeSelectorProvisioner.provision(pod.getSpec());
|
||||
tolerationsProvisioner.provision(pod.getSpec());
|
||||
|
||||
KubernetesDeployments deployments = null;
|
||||
try {
|
||||
KubernetesNamespace ns = factory.access(workspaceId, namespace);
|
||||
|
||||
if (!checkPVCExistsAndNotTerminating(ns, pvcName)) {
|
||||
return;
|
||||
}
|
||||
|
||||
deployments = ns.deployments();
|
||||
deployments.create(pod);
|
||||
watchLogsIfDebugEnabled(deployments, pod, identity, startOptions);
|
||||
|
||||
PodStatus finishedStatus = waitPodStatus(podName, deployments, watchFailureEvents);
|
||||
if (POD_PHASE_FAILED.equals(finishedStatus.getPhase())) {
|
||||
String logs = deployments.getPodLogs(podName);
|
||||
LOG.error(
|
||||
"Job command '{}' execution is failed. Logs: {}",
|
||||
Arrays.toString(command),
|
||||
Strings.nullToEmpty(logs).replace("\n", " \\n")); // Force logs onto one line
|
||||
}
|
||||
} catch (InfrastructureException ex) {
|
||||
LOG.error(
|
||||
"Unable to perform '{}' command for the workspace '{}' cause: '{}'",
|
||||
Arrays.toString(command),
|
||||
workspaceId,
|
||||
ex.getMessage());
|
||||
deployments.stopWatch(true);
|
||||
} finally {
|
||||
if (deployments != null) {
|
||||
deployments.stopWatch();
|
||||
try {
|
||||
deployments.delete(podName);
|
||||
} catch (InfrastructureException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if specified PVC exists and is not in a terminating phase.
|
||||
*
|
||||
* @param namespace the namespace to check the PVC for
|
||||
* @param pvcName the name of the PVC to check for
|
||||
* @return true if if specified PVC exists and is not in a terminating phase
|
||||
*/
|
||||
private boolean checkPVCExistsAndNotTerminating(KubernetesNamespace namespace, String pvcName)
|
||||
throws InfrastructureException {
|
||||
for (PersistentVolumeClaim pvc : namespace.persistentVolumeClaims().get()) {
|
||||
if (pvcName.equals(pvc.getMetadata().getName())) {
|
||||
return !PVC_PHASE_TERMINATING.equals(pvc.getStatus().getPhase());
|
||||
}
|
||||
}
|
||||
// PVC does not exist
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the PodStatus of a specified pod after waiting for the pod to terminate. If
|
||||
* watchFailureEvents is true and a failure event is detected while waiting, this method will
|
||||
* cancel the wait, delete the pod and throw an InfrastructureException.
|
||||
*
|
||||
* @param podName the name of the pod to wait for
|
||||
* @param deployments the KubernetesDeployments object used to create the pod
|
||||
* @param watchFailureEvents true if failure events should be watched
|
||||
* @throws InfrastructureException
|
||||
*/
|
||||
private PodStatus waitPodStatus(
|
||||
String podName, KubernetesDeployments deployments, boolean watchFailureEvents)
|
||||
throws InfrastructureException {
|
||||
|
||||
CompletableFuture<Pod> podFuture = deployments.waitAsync(podName, POD_PREDICATE::apply);
|
||||
|
||||
if (watchFailureEvents) {
|
||||
watchFailureEvents(podName, deployments, podFuture);
|
||||
}
|
||||
|
||||
try {
|
||||
return podFuture.get(WAIT_POD_TIMEOUT_MIN, TimeUnit.MINUTES).getStatus();
|
||||
} catch (ExecutionException e) {
|
||||
throw new InfrastructureException(e.getCause().getMessage(), e);
|
||||
} catch (TimeoutException e) {
|
||||
throw new InfrastructureException("Waiting for pod '" + podName + "' reached timeout");
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new InfrastructureException("Waiting for pod '" + podName + "' was interrupted");
|
||||
} catch (CancellationException e) {
|
||||
throw new InfrastructureException("Cancelled waiting for pod: '" + podName + "'");
|
||||
}
|
||||
}
|
||||
|
||||
private void watchFailureEvents(
|
||||
String podName, KubernetesDeployments deployments, CompletableFuture<Pod> futureToCancel)
|
||||
throws InfrastructureException {
|
||||
deployments.watchEvents(
|
||||
event -> {
|
||||
if (podName.equals(event.getPodName()) && isPodFailureEvent(event)) {
|
||||
try {
|
||||
LOG.debug(
|
||||
"Deleting pod: '{}' due to failure event: '{}'", podName, event.getMessage());
|
||||
futureToCancel.cancel(true);
|
||||
deployments.delete(event.getPodName());
|
||||
} catch (InfrastructureException ex) {
|
||||
LOG.error("Unable to delete failing pod: '{}' cause: '{}'", podName, ex.getMessage());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private boolean isPodFailureEvent(PodEvent event) {
|
||||
return POD_EVENT_FAILED.equals(event.getReason())
|
||||
|| POD_EVENT_FAILED_SCHEDULING.equals(event.getReason())
|
||||
|| POD_EVENT_FAILED_MOUNT.equals(event.getReason());
|
||||
}
|
||||
|
||||
private void watchLogsIfDebugEnabled(
|
||||
KubernetesDeployments deployment,
|
||||
Pod pod,
|
||||
RuntimeIdentity identity,
|
||||
Map<String, String> startOptions)
|
||||
throws InfrastructureException {
|
||||
if (LogWatcher.shouldWatchLogs(startOptions)) {
|
||||
deployment.watchLogs(
|
||||
new PodLogToEventPublisher(eventsPublisher, identity),
|
||||
eventsPublisher,
|
||||
LogWatchTimeouts.AGGRESSIVE,
|
||||
Collections.singleton(pod.getMetadata().getName()),
|
||||
LogWatcher.getLogLimitBytes(startOptions));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the command by given base and paths.
|
||||
*
|
||||
* <p>Command is consists of base(e.g. rm -rf) and list of directories which are modified with
|
||||
* mount path.
|
||||
*
|
||||
* @param base command base
|
||||
* @param dirs the paths which are used as arguments for the command base
|
||||
* @return complete command with given arguments
|
||||
*/
|
||||
@VisibleForTesting
|
||||
String[] buildCommand(String[] base, String... dirs) {
|
||||
return Stream.concat(
|
||||
Arrays.stream(base),
|
||||
Arrays.stream(dirs)
|
||||
.map(dir -> JOB_MOUNT_PATH + (dir.startsWith("/") ? dir : '/' + dir)))
|
||||
.toArray(String[]::new);
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
void shutdown() {
|
||||
if (!executor.isShutdown()) {
|
||||
executor.shutdown();
|
||||
try {
|
||||
if (!executor.awaitTermination(30, SECONDS)) {
|
||||
executor.shutdownNow();
|
||||
if (!executor.awaitTermination(60, SECONDS))
|
||||
LOG.error("Couldn't shutdown PVCSubPathHelper thread pool");
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
executor.shutdownNow();
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
LOG.info("PVCSubPathHelper thread pool is terminated");
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns new instance of {@link Pod} with given name and command. */
|
||||
private Pod newPod(String podName, String pvcName, String[] command) {
|
||||
final Container container =
|
||||
new ContainerBuilder()
|
||||
.withName(podName)
|
||||
.withImage(jobImage)
|
||||
.withImagePullPolicy(imagePullPolicy)
|
||||
.withCommand(command)
|
||||
.withVolumeMounts(newVolumeMount(pvcName, JOB_MOUNT_PATH, null))
|
||||
.withNewResources()
|
||||
.endResources()
|
||||
.build();
|
||||
Containers.addRamLimit(container, jobMemoryLimit);
|
||||
Containers.addRamRequest(container, jobMemoryLimit);
|
||||
return new PodBuilder()
|
||||
.withNewMetadata()
|
||||
.withName(podName)
|
||||
.endMetadata()
|
||||
.withNewSpec()
|
||||
.withContainers(container)
|
||||
.withVolumes(newVolume(pvcName, pvcName))
|
||||
.withRestartPolicy(POD_RESTART_POLICY)
|
||||
.endSpec()
|
||||
.build();
|
||||
}
|
||||
|
||||
/** Checks whether pod is Failed or Successfully finished command execution */
|
||||
static class JobFinishedPredicate implements Predicate<Pod> {
|
||||
@Override
|
||||
public boolean apply(Pod pod) {
|
||||
if (pod.getStatus() == null) {
|
||||
return false;
|
||||
}
|
||||
switch (pod.getStatus().getPhase()) {
|
||||
case POD_PHASE_FAILED:
|
||||
// fall through
|
||||
case POD_PHASE_SUCCEEDED:
|
||||
// job is finished.
|
||||
return true;
|
||||
default:
|
||||
// job is not finished.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.Volume;
|
||||
import io.fabric8.kubernetes.api.model.VolumeBuilder;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment.PodData;
|
||||
|
||||
/**
|
||||
* Helps to works with Pods Volumes, like reference them to another PVC.
|
||||
*
|
||||
* @author Sergii Leshchenko
|
||||
*/
|
||||
public class PodsVolumes {
|
||||
|
||||
/**
|
||||
* Changes all pods volumes witch referenced the specified PVC to reference new PVC.
|
||||
*
|
||||
* @param pods pods to change
|
||||
* @param currentPVCName current PVC name for filtering pods volumes
|
||||
* @param newPVCName new PVC name that should be used
|
||||
*/
|
||||
public void changePVCReferences(
|
||||
Collection<PodData> pods, String currentPVCName, String newPVCName) {
|
||||
pods.stream()
|
||||
.flatMap(p -> p.getSpec().getVolumes().stream())
|
||||
.filter(
|
||||
v ->
|
||||
v.getPersistentVolumeClaim() != null
|
||||
&& v.getPersistentVolumeClaim().getClaimName().equals(currentPVCName))
|
||||
.forEach(v -> v.getPersistentVolumeClaim().setClaimName(newPVCName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces all pods PVC sourced volumes with the specified one.
|
||||
*
|
||||
* @param pods pods to change
|
||||
* @param commonPVCName PVC name that should be referenced in all existing PVC sources volumes
|
||||
*/
|
||||
public void replacePVCVolumesWithCommon(Map<String, PodData> pods, String commonPVCName) {
|
||||
for (PodData pod : pods.values()) {
|
||||
Set<String> pvcSourcedVolumes = reducePVCSourcedVolumes(pod.getSpec().getVolumes());
|
||||
|
||||
if (pvcSourcedVolumes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// add common PVC sourced volume instead of removed
|
||||
pod.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName(commonPVCName)
|
||||
.withNewPersistentVolumeClaim()
|
||||
.withClaimName(commonPVCName)
|
||||
.endPersistentVolumeClaim()
|
||||
.build());
|
||||
|
||||
Stream.concat(
|
||||
pod.getSpec().getContainers().stream(), pod.getSpec().getInitContainers().stream())
|
||||
.flatMap(c -> c.getVolumeMounts().stream())
|
||||
.filter(vm -> pvcSourcedVolumes.contains(vm.getName()))
|
||||
.forEach(vm -> vm.setName(commonPVCName));
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<String> reducePVCSourcedVolumes(List<Volume> volumes) {
|
||||
Set<String> pvcSourcedVolumes = new HashSet<>();
|
||||
Iterator<Volume> volumeIterator = volumes.iterator();
|
||||
while (volumeIterator.hasNext()) {
|
||||
Volume volume = volumeIterator.next();
|
||||
if (volume.getPersistentVolumeClaim() != null) {
|
||||
pvcSourcedVolumes.add(volume.getName());
|
||||
volumeIterator.remove();
|
||||
}
|
||||
}
|
||||
return pvcSourcedVolumes;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.Constants.CHE_VOLUME_NAME_LABEL;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.provision.LogsVolumeMachineProvisioner.LOGS_VOLUME_NAME;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
|
||||
import io.fabric8.kubernetes.api.model.VolumeMount;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.Names;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment.PodData;
|
||||
|
||||
/**
|
||||
* Is responsible for prefixing sub-paths of volume mounts and should be used by all PVCs
|
||||
* strategies.
|
||||
*
|
||||
* @author Sergii Leshchenko
|
||||
*/
|
||||
public class SubPathPrefixes {
|
||||
|
||||
/**
|
||||
* Prefixes volumes mounts of containers inside of the specified kubernetes environment.
|
||||
*
|
||||
* <p>Subpaths have the following format: '{workspaceId}/{Che Volume name|PVC name}'.<br>
|
||||
* Where Che Volume is used if it is present in PVC labels, otherwise PVC name will be used.<br>
|
||||
* Note that logs volume has the special format: '{workspaceId}/{volumeName}/{machineName}'. It is
|
||||
* done in this way to avoid conflicts e.g. two identical agents inside different machines produce
|
||||
* the same log file.
|
||||
*
|
||||
* @param k8sEnv environment to process
|
||||
* @param workspaceId workspace id that should be used as prefix
|
||||
*/
|
||||
public void prefixVolumeMountsSubpaths(KubernetesEnvironment k8sEnv, String workspaceId) {
|
||||
for (PodData pod : k8sEnv.getPodsData().values()) {
|
||||
Map<String, String> volumeToCheVolumeName = new HashMap<>();
|
||||
for (io.fabric8.kubernetes.api.model.Volume volume : pod.getSpec().getVolumes()) {
|
||||
if (volume.getPersistentVolumeClaim() == null) {
|
||||
continue;
|
||||
}
|
||||
PersistentVolumeClaim pvc =
|
||||
k8sEnv
|
||||
.getPersistentVolumeClaims()
|
||||
.get(volume.getPersistentVolumeClaim().getClaimName());
|
||||
|
||||
String cheVolumeName = pvc.getMetadata().getLabels().get(CHE_VOLUME_NAME_LABEL);
|
||||
if (cheVolumeName == null) {
|
||||
cheVolumeName = pvc.getMetadata().getName();
|
||||
pvc.getMetadata().getLabels().put(CHE_VOLUME_NAME_LABEL, cheVolumeName);
|
||||
}
|
||||
volumeToCheVolumeName.put(volume.getName(), cheVolumeName);
|
||||
}
|
||||
|
||||
if (volumeToCheVolumeName.isEmpty()) {
|
||||
// Pod does not have any volume that references PVC
|
||||
continue;
|
||||
}
|
||||
|
||||
Stream.concat(
|
||||
pod.getSpec().getContainers().stream(), pod.getSpec().getInitContainers().stream())
|
||||
.forEach(
|
||||
c -> {
|
||||
for (VolumeMount volumeMount : c.getVolumeMounts()) {
|
||||
String pvcName = volumeToCheVolumeName.get(volumeMount.getName());
|
||||
if (pvcName == null) {
|
||||
// should not happens since Volume<>PVC links are checked during recipe
|
||||
// validation
|
||||
continue;
|
||||
}
|
||||
|
||||
String volumeSubPath =
|
||||
getVolumeMountSubpath(
|
||||
volumeMount, pvcName, workspaceId, Names.machineName(pod, c));
|
||||
volumeMount.setSubPath(volumeSubPath);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** Get sub-path for particular Volume Mount in a particular workspace */
|
||||
private String getVolumeMountSubpath(
|
||||
VolumeMount volumeMount, String volumeName, String workspaceId, String machineName) {
|
||||
String volumeMountSubPath = Strings.nullToEmpty(volumeMount.getSubPath());
|
||||
if (!volumeMountSubPath.startsWith("/")) {
|
||||
volumeMountSubPath = '/' + volumeMountSubPath;
|
||||
}
|
||||
|
||||
return getVolumeSubpath(workspaceId, volumeName, machineName) + volumeMountSubPath;
|
||||
}
|
||||
|
||||
private String getVolumeSubpath(String workspaceId, String volumeName, String machineName) {
|
||||
// logs must be located inside the folder related to the machine because few machines can
|
||||
// contain the identical agents and in this case, a conflict is possible.
|
||||
if (LOGS_VOLUME_NAME.equals(volumeName)) {
|
||||
return getWorkspaceSubPath(workspaceId) + '/' + volumeName + '/' + machineName;
|
||||
}
|
||||
return getWorkspaceSubPath(workspaceId) + '/' + volumeName;
|
||||
}
|
||||
|
||||
/** Get sub-path that holds all the volumes of a particular workspace */
|
||||
public String getWorkspaceSubPath(String workspaceId) {
|
||||
return workspaceId;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
import org.eclipse.che.api.core.model.workspace.Workspace;
|
||||
import org.eclipse.che.api.core.notification.EventService;
|
||||
import org.eclipse.che.api.workspace.server.spi.InfrastructureException;
|
||||
import org.eclipse.che.api.workspace.shared.event.WorkspaceRemovedEvent;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Cleans the workspace related Kubernetes resources after {@link WorkspaceRemovedEvent}.
|
||||
*
|
||||
* <p>Note that depending on a configuration different types of cleaners may be chosen. In case of
|
||||
* configuration when new Kubernetes namespace created for each workspace, the whole namespace will
|
||||
* be removed, after workspace removal.
|
||||
*
|
||||
* @author Anton Korneta
|
||||
*/
|
||||
@Singleton
|
||||
public class WorkspacePVCCleaner {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(WorkspacePVCCleaner.class);
|
||||
|
||||
private final boolean pvcEnabled;
|
||||
private final WorkspaceVolumesStrategy strategy;
|
||||
|
||||
@Inject
|
||||
public WorkspacePVCCleaner(WorkspaceVolumesStrategy pvcStrategy) {
|
||||
this.pvcEnabled = false;
|
||||
this.strategy = pvcStrategy;
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void subscribe(EventService eventService) {
|
||||
if (pvcEnabled) {
|
||||
eventService.subscribe(
|
||||
event -> {
|
||||
final Workspace workspace = event.getWorkspace();
|
||||
try {
|
||||
strategy.cleanup(workspace);
|
||||
} catch (InfrastructureException ex) {
|
||||
LOG.error(
|
||||
"Failed to cleanup workspace '{}' data. Cause: {}",
|
||||
workspace.getId(),
|
||||
ex.getMessage());
|
||||
}
|
||||
},
|
||||
WorkspaceRemovedEvent.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import java.util.Map;
|
||||
import org.eclipse.che.api.core.model.workspace.Workspace;
|
||||
import org.eclipse.che.api.core.model.workspace.runtime.RuntimeIdentity;
|
||||
import org.eclipse.che.api.workspace.server.spi.InfrastructureException;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.ConfigurationProvisioner;
|
||||
|
||||
/**
|
||||
* Defines a basic set of operations for workspace volume provisioning strategies.
|
||||
*
|
||||
* @author Anton Korneta
|
||||
*/
|
||||
public interface WorkspaceVolumesStrategy extends ConfigurationProvisioner {
|
||||
|
||||
/**
|
||||
* Configures the workspace PVCs, volumes, claim bindings with a strategy specific options.
|
||||
*
|
||||
* @param k8sEnv Kubernetes environment
|
||||
* @param identity runtime identity
|
||||
* @throws InfrastructureException when any error occurs while provisioning volumes
|
||||
*/
|
||||
@Override
|
||||
void provision(KubernetesEnvironment k8sEnv, RuntimeIdentity identity)
|
||||
throws InfrastructureException;
|
||||
|
||||
/**
|
||||
* Prepares volumes for backup of workspace data on a specific machine in a strategy specific way.
|
||||
* Note that this step, depending on the strategy, may take some time.
|
||||
*
|
||||
* @param k8sEnv Kubernetes environment that changes as a result of preparation
|
||||
* @param identity the target into which the workspace is being provisioned and where the volumes
|
||||
* will be prepared.
|
||||
* @param timeoutMillis timeout in milliseconds
|
||||
* @throws InfrastructureException when any error while preparation occurs
|
||||
*/
|
||||
void prepare(
|
||||
KubernetesEnvironment k8sEnv,
|
||||
RuntimeIdentity identity,
|
||||
long timeoutMillis,
|
||||
Map<String, String> startOptions)
|
||||
throws InfrastructureException;
|
||||
|
||||
/**
|
||||
* Cleanups workspace backed up data in a strategy specific way.
|
||||
*
|
||||
* @param workspace the workspace for which cleanup will be performed
|
||||
* @throws InfrastructureException when any error while cleanup occurs
|
||||
*/
|
||||
void cleanup(Workspace workspace) throws InfrastructureException;
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
|
|
@ -11,12 +11,7 @@
|
|||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.provision;
|
||||
|
||||
import static java.lang.Boolean.parseBoolean;
|
||||
import static org.eclipse.che.api.workspace.shared.Constants.ASYNC_PERSIST_ATTRIBUTE;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.EphemeralWorkspaceUtility.isEphemeral;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.PodSpec;
|
||||
import java.util.Map;
|
||||
import javax.inject.Inject;
|
||||
import javax.inject.Named;
|
||||
import org.eclipse.che.api.core.model.workspace.runtime.RuntimeIdentity;
|
||||
|
|
@ -75,10 +70,7 @@ public class PodTerminationGracePeriodProvisioner implements ConfigurationProvis
|
|||
}
|
||||
|
||||
private long getGraceTerminationPeriodSec(KubernetesEnvironment k8sEnv) {
|
||||
Map<String, String> attributes = k8sEnv.getAttributes();
|
||||
if (isEphemeral(attributes) && parseBoolean(attributes.get(ASYNC_PERSIST_ATTRIBUTE))) {
|
||||
return GRACE_TERMINATION_PERIOD_ASYNC_STORAGE_WS_SEC;
|
||||
}
|
||||
// TODO: Should probably remove this class
|
||||
return graceTerminationPeriodSec;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import org.eclipse.che.workspace.infrastructure.kubernetes.StartSynchronizer;
|
|||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespace;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespaceFactory;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.EphemeralWorkspaceUtility;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.util.RuntimeEventsPublisher;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.util.UnrecoverablePodEventListenerFactory;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.wsplugins.brokerphases.BrokerEnvironmentFactory;
|
||||
|
|
@ -97,7 +96,6 @@ public class PluginBrokerManager<E extends KubernetesEnvironment> {
|
|||
RuntimeIdentity identity,
|
||||
StartSynchronizer startSynchronizer,
|
||||
Collection<PluginFQN> pluginFQNs,
|
||||
boolean isEphemeral,
|
||||
boolean mergePlugins,
|
||||
Map<String, String> startOptions)
|
||||
throws InfrastructureException {
|
||||
|
|
@ -108,9 +106,7 @@ public class PluginBrokerManager<E extends KubernetesEnvironment> {
|
|||
|
||||
E brokerEnvironment =
|
||||
brokerEnvironmentFactory.createForMetadataBroker(pluginFQNs, identity, mergePlugins);
|
||||
if (isEphemeral) {
|
||||
EphemeralWorkspaceUtility.makeEphemeral(brokerEnvironment.getAttributes());
|
||||
}
|
||||
// TODO: Potentially remove this class
|
||||
environmentProvisioner.provision(brokerEnvironment, identity);
|
||||
|
||||
ListenBrokerEvents listenBrokerEvents = getListenEventPhase(workspaceId, brokersResult);
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
|
|
@ -29,7 +29,6 @@ import org.eclipse.che.api.workspace.server.wsplugins.model.PluginFQN;
|
|||
import org.eclipse.che.commons.annotation.Traced;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.StartSynchronizer;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.EphemeralWorkspaceUtility;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -84,11 +83,10 @@ public class SidecarToolingProvisioner<E extends KubernetesEnvironment> {
|
|||
"Sidecar tooling configuration is not supported with environment type " + recipeType);
|
||||
}
|
||||
|
||||
boolean isEphemeral = EphemeralWorkspaceUtility.isEphemeral(environment.getAttributes());
|
||||
boolean mergePlugins = shouldMergePlugins(environment.getAttributes());
|
||||
List<ChePlugin> chePlugins =
|
||||
pluginBrokerManager.getTooling(
|
||||
identity, startSynchronizer, pluginFQNs, isEphemeral, mergePlugins, startOptions);
|
||||
identity, startSynchronizer, pluginFQNs, mergePlugins, startOptions);
|
||||
|
||||
pluginsApplier.apply(identity, environment, chePlugins);
|
||||
artifactsBrokerApplier.apply(environment, identity, pluginFQNs, mergePlugins);
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
|
|
@ -31,7 +31,6 @@ import org.eclipse.che.api.workspace.server.wsplugins.ChePluginsApplier;
|
|||
import org.eclipse.che.api.workspace.server.wsplugins.PluginFQNParser;
|
||||
import org.eclipse.che.api.workspace.server.wsplugins.model.PluginFQN;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.EphemeralWorkspaceUtility;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.wsplugins.KubernetesArtifactsBrokerApplier;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.wsplugins.PluginBrokerManager;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.wsplugins.SidecarToolingProvisioner;
|
||||
|
|
@ -75,8 +74,6 @@ public class SidecarToolingProvisionerTest {
|
|||
|
||||
@BeforeMethod
|
||||
public void setUp() throws Exception {
|
||||
Map<String, String> ephemeralEnvAttributes = new HashMap<>(environmentAttributesBase);
|
||||
EphemeralWorkspaceUtility.makeEphemeral(ephemeralEnvAttributes);
|
||||
Map<String, String> nonEphemeralEnvAttributes = new HashMap<>(environmentAttributesBase);
|
||||
|
||||
Map<String, String> mergePluginsEnvAttributes = new HashMap<>(environmentAttributesBase);
|
||||
|
|
@ -89,7 +86,6 @@ public class SidecarToolingProvisionerTest {
|
|||
lenient().doReturn(RECIPE_TYPE).when(mergePluginsEnvironment).getType();
|
||||
lenient().doReturn(RECIPE_TYPE).when(noMergePluginsEnvironment).getType();
|
||||
lenient().doReturn(nonEphemeralEnvAttributes).when(nonEphemeralEnvironment).getAttributes();
|
||||
lenient().doReturn(ephemeralEnvAttributes).when(ephemeralEnvironment).getAttributes();
|
||||
lenient().doReturn(mergePluginsEnvAttributes).when(mergePluginsEnvironment).getAttributes();
|
||||
lenient().doReturn(noMergePluginsEnvAttributes).when(noMergePluginsEnvironment).getAttributes();
|
||||
doReturn(pluginFQNs).when(pluginFQNParser).parsePlugins(any());
|
||||
|
|
@ -118,7 +114,7 @@ public class SidecarToolingProvisionerTest {
|
|||
provisioner = getSidecarToolingProvisioner("true");
|
||||
provisioner.provision(runtimeId, startSynchronizer, nonEphemeralEnvironment, emptyMap());
|
||||
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), anyBoolean(), eq(true), any());
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), eq(true), any());
|
||||
verify(artifactsBrokerApplier, times(1)).apply(any(), any(), any(), eq(true));
|
||||
}
|
||||
|
||||
|
|
@ -127,7 +123,7 @@ public class SidecarToolingProvisionerTest {
|
|||
provisioner = getSidecarToolingProvisioner("false");
|
||||
provisioner.provision(runtimeId, startSynchronizer, nonEphemeralEnvironment, emptyMap());
|
||||
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), anyBoolean(), eq(false), any());
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), eq(false), any());
|
||||
verify(artifactsBrokerApplier, times(1)).apply(any(), any(), any(), eq(false));
|
||||
}
|
||||
|
||||
|
|
@ -136,7 +132,7 @@ public class SidecarToolingProvisionerTest {
|
|||
provisioner = getSidecarToolingProvisioner("false");
|
||||
provisioner.provision(runtimeId, startSynchronizer, mergePluginsEnvironment, emptyMap());
|
||||
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), anyBoolean(), eq(true), any());
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), eq(true), any());
|
||||
verify(artifactsBrokerApplier, times(1)).apply(any(), any(), any(), eq(true));
|
||||
}
|
||||
|
||||
|
|
@ -145,7 +141,7 @@ public class SidecarToolingProvisionerTest {
|
|||
provisioner = getSidecarToolingProvisioner("true");
|
||||
provisioner.provision(runtimeId, startSynchronizer, noMergePluginsEnvironment, emptyMap());
|
||||
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), anyBoolean(), eq(false), any());
|
||||
verify(brokerManager, times(1)).getTooling(any(), any(), any(), eq(false), any());
|
||||
verify(artifactsBrokerApplier, times(1)).apply(any(), any(), any(), eq(false));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,419 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static com.google.common.collect.ImmutableMap.of;
|
||||
import static java.lang.Boolean.TRUE;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static org.eclipse.che.api.workspace.shared.Constants.DEBUG_WORKSPACE_START;
|
||||
import static org.eclipse.che.api.workspace.shared.Constants.DEBUG_WORKSPACE_START_LOG_LIMIT_BYTES;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.PVCSubPathHelper.JOB_MOUNT_PATH;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.PVCSubPathHelper.MKDIR_COMMAND_BASE;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.PVCSubPathHelper.POD_PHASE_FAILED;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.PVCSubPathHelper.POD_PHASE_SUCCEEDED;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.PVCSubPathHelper.RM_COMMAND_BASE;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.ArgumentMatchers.nullable;
|
||||
import static org.mockito.Mockito.anyLong;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.lenient;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
import static org.testng.Assert.assertEquals;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import io.fabric8.kubernetes.api.model.Container;
|
||||
import io.fabric8.kubernetes.api.model.Event;
|
||||
import io.fabric8.kubernetes.api.model.ObjectMeta;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimStatus;
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
import io.fabric8.kubernetes.api.model.PodStatus;
|
||||
import io.fabric8.kubernetes.api.model.Quantity;
|
||||
import io.fabric8.kubernetes.api.model.ResourceRequirements;
|
||||
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
|
||||
import io.fabric8.kubernetes.client.Watcher;
|
||||
import io.fabric8.kubernetes.client.WatcherException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Stream;
|
||||
import org.eclipse.che.api.core.model.workspace.runtime.RuntimeIdentity;
|
||||
import org.eclipse.che.api.workspace.server.spi.InfrastructureException;
|
||||
import org.eclipse.che.commons.observability.NoopExecutorServiceWrapper;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesDeployments;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespace;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesNamespaceFactory;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.KubernetesPersistentVolumeClaims;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.event.PodEvent;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.event.PodEventHandler;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.NodeSelectorProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.SecurityContextProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.TolerationsProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.util.RuntimeEventsPublisher;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Captor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.testng.MockitoTestNGListener;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Listeners;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
/**
|
||||
* Tests {@link PVCSubPathHelper}.
|
||||
*
|
||||
* @author Anton Korneta
|
||||
*/
|
||||
@Listeners(MockitoTestNGListener.class)
|
||||
public class PVCSubPathHelperTest {
|
||||
|
||||
private static final String WORKSPACE_ID = "workspace132";
|
||||
private static final String NAMESPACE = "namespace";
|
||||
private static final String PVC_NAME = "che-workspace-claim";
|
||||
private static final String PVC_PHASE_BOUND = "Bound";
|
||||
private static final String PVC_PHASE_TERMINATING = "Terminating";
|
||||
private static final String jobMemoryLimit = "250Mi";
|
||||
private static final String jobImage = "centos:centos7";
|
||||
private static final String PROJECTS_PATH = "/projects";
|
||||
private static final String M2_PATH = "/.m2";
|
||||
|
||||
@Mock private SecurityContextProvisioner securityContextProvisioner;
|
||||
@Mock private NodeSelectorProvisioner nodeSelectorProvisioner;
|
||||
@Mock private TolerationsProvisioner tolerationsProvisioner;
|
||||
@Mock private KubernetesNamespaceFactory k8sNamespaceFactory;
|
||||
@Mock private KubernetesNamespace k8sNamespace;
|
||||
@Mock private KubernetesDeployments osDeployments;
|
||||
@Mock private KubernetesPersistentVolumeClaims kubernetesPVCs;
|
||||
@Mock private PersistentVolumeClaim pvc;
|
||||
@Mock private ObjectMeta pvcMetadata;
|
||||
@Mock private PersistentVolumeClaimStatus pvcStatus;
|
||||
@Mock private Pod pod;
|
||||
@Mock private PodStatus podStatus;
|
||||
@Mock private RuntimeEventsPublisher eventsPublisher;
|
||||
@Mock private RuntimeIdentity identity;
|
||||
|
||||
@Captor private ArgumentCaptor<Pod> podCaptor;
|
||||
|
||||
private PVCSubPathHelper pvcSubPathHelper;
|
||||
|
||||
@BeforeMethod
|
||||
public void setup() throws Exception {
|
||||
pvcSubPathHelper =
|
||||
new PVCSubPathHelper(
|
||||
jobMemoryLimit,
|
||||
jobImage,
|
||||
"IfNotPresent",
|
||||
k8sNamespaceFactory,
|
||||
securityContextProvisioner,
|
||||
nodeSelectorProvisioner,
|
||||
tolerationsProvisioner,
|
||||
new NoopExecutorServiceWrapper(),
|
||||
eventsPublisher);
|
||||
lenient().when(identity.getInfrastructureNamespace()).thenReturn(NAMESPACE);
|
||||
lenient().when(k8sNamespaceFactory.access(WORKSPACE_ID, NAMESPACE)).thenReturn(k8sNamespace);
|
||||
lenient().when(k8sNamespace.deployments()).thenReturn(osDeployments);
|
||||
lenient().when(k8sNamespace.persistentVolumeClaims()).thenReturn(kubernetesPVCs);
|
||||
lenient().when(kubernetesPVCs.get()).thenReturn(Arrays.asList(pvc));
|
||||
lenient().when(pvc.getMetadata()).thenReturn(pvcMetadata);
|
||||
lenient().when(pvcMetadata.getName()).thenReturn(PVC_NAME);
|
||||
lenient().when(pvc.getStatus()).thenReturn(pvcStatus);
|
||||
lenient().when(pvcStatus.getPhase()).thenReturn(PVC_PHASE_BOUND);
|
||||
lenient().when(pod.getStatus()).thenReturn(podStatus);
|
||||
lenient().when(osDeployments.deploy(nullable(Pod.class))).thenReturn(pod);
|
||||
lenient()
|
||||
.when(osDeployments.waitAsync(anyString(), any()))
|
||||
.thenReturn(CompletableFuture.completedFuture(pod));
|
||||
lenient().doNothing().when(osDeployments).delete(anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildsCommandByGivenBaseAndPaths() throws Exception {
|
||||
final String[] paths = {WORKSPACE_ID + PROJECTS_PATH, WORKSPACE_ID + M2_PATH};
|
||||
|
||||
final String[] actual = pvcSubPathHelper.buildCommand(MKDIR_COMMAND_BASE, paths);
|
||||
|
||||
final String[] expected = new String[MKDIR_COMMAND_BASE.length + 2];
|
||||
System.arraycopy(MKDIR_COMMAND_BASE, 0, expected, 0, MKDIR_COMMAND_BASE.length);
|
||||
expected[expected.length - 1] = JOB_MOUNT_PATH + '/' + WORKSPACE_ID + M2_PATH;
|
||||
expected[expected.length - 2] = JOB_MOUNT_PATH + '/' + WORKSPACE_ID + PROJECTS_PATH;
|
||||
assertEquals(actual, expected);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSuccessfullyCreatesWorkspaceDirs() throws Exception {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
|
||||
|
||||
pvcSubPathHelper.createDirs(
|
||||
identity, WORKSPACE_ID, PVC_NAME, emptyMap(), WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).create(podCaptor.capture());
|
||||
final List<String> actual = podCaptor.getValue().getSpec().getContainers().get(0).getCommand();
|
||||
|
||||
for (Container container : podCaptor.getValue().getSpec().getContainers()) {
|
||||
assertEquals(container.getImagePullPolicy(), "IfNotPresent");
|
||||
}
|
||||
final List<String> expected =
|
||||
Stream.concat(
|
||||
Arrays.stream(MKDIR_COMMAND_BASE),
|
||||
Stream.of(JOB_MOUNT_PATH + '/' + WORKSPACE_ID + PROJECTS_PATH))
|
||||
.collect(toList());
|
||||
assertEquals(actual, expected);
|
||||
verify(osDeployments).waitAsync(anyString(), any());
|
||||
verify(podStatus).getPhase();
|
||||
verify(osDeployments).delete(anyString());
|
||||
verify(securityContextProvisioner).provision(any());
|
||||
verify(nodeSelectorProvisioner).provision(any());
|
||||
verify(tolerationsProvisioner).provision(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWatchLogsWhenCreatingWorkspaceDirs() throws InfrastructureException {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
|
||||
|
||||
pvcSubPathHelper.createDirs(
|
||||
identity,
|
||||
WORKSPACE_ID,
|
||||
PVC_NAME,
|
||||
ImmutableMap.of(
|
||||
DEBUG_WORKSPACE_START, TRUE.toString(), DEBUG_WORKSPACE_START_LOG_LIMIT_BYTES, "123"),
|
||||
WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).watchLogs(any(), any(), any(), any(), eq(123L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoNotWatchFailureEventsWhenCreatingWorkspaceDirs()
|
||||
throws InfrastructureException {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
|
||||
|
||||
pvcSubPathHelper.createDirs(
|
||||
identity,
|
||||
WORKSPACE_ID,
|
||||
PVC_NAME,
|
||||
ImmutableMap.of(
|
||||
DEBUG_WORKSPACE_START, TRUE.toString(), DEBUG_WORKSPACE_START_LOG_LIMIT_BYTES, "123"),
|
||||
WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments, never()).watchEvents(any(PodEventHandler.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetMemoryLimitAndRequest() throws Exception {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
|
||||
|
||||
pvcSubPathHelper.createDirs(
|
||||
identity, WORKSPACE_ID, PVC_NAME, emptyMap(), WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).create(podCaptor.capture());
|
||||
ResourceRequirements actual =
|
||||
podCaptor.getValue().getSpec().getContainers().get(0).getResources();
|
||||
ResourceRequirements expected =
|
||||
new ResourceRequirementsBuilder()
|
||||
.addToLimits(of("memory", new Quantity(jobMemoryLimit)))
|
||||
.addToRequests(of("memory", new Quantity(jobMemoryLimit)))
|
||||
.build();
|
||||
assertEquals(actual, expected);
|
||||
verify(osDeployments).waitAsync(anyString(), any());
|
||||
verify(podStatus).getPhase();
|
||||
verify(osDeployments).delete(anyString());
|
||||
verify(securityContextProvisioner).provision(any());
|
||||
verify(nodeSelectorProvisioner).provision(any());
|
||||
verify(tolerationsProvisioner).provision(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLogErrorWhenJobExecutionFailed() throws Exception {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_FAILED);
|
||||
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).create(any());
|
||||
verify(osDeployments).waitAsync(anyString(), any());
|
||||
verify(podStatus).getPhase();
|
||||
verify(osDeployments).getPodLogs(any());
|
||||
verify(osDeployments).delete(anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLogErrorWhenKubernetesProjectCreationFailed() throws Exception {
|
||||
when(osDeployments.create(any()))
|
||||
.thenThrow(new InfrastructureException("Kubernetes namespace creation failed"));
|
||||
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(k8sNamespaceFactory).access(WORKSPACE_ID, NAMESPACE);
|
||||
verify(osDeployments).create(any());
|
||||
verify(osDeployments, never()).waitAsync(anyString(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLogErrorWhenKubernetesPodCreationFailed() throws Exception {
|
||||
when(osDeployments.create(any()))
|
||||
.thenThrow(new InfrastructureException("Kubernetes pod creation failed"));
|
||||
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(k8sNamespaceFactory).access(WORKSPACE_ID, NAMESPACE);
|
||||
verify(k8sNamespace).deployments();
|
||||
verify(osDeployments).create(any());
|
||||
verify(osDeployments, never()).waitAsync(anyString(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIgnoreExceptionWhenPodJobRemovalFailed() throws Exception {
|
||||
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
|
||||
doThrow(InfrastructureException.class).when(osDeployments).delete(anyString());
|
||||
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).create(any());
|
||||
verify(osDeployments).waitAsync(anyString(), any());
|
||||
verify(podStatus).getPhase();
|
||||
verify(osDeployments).delete(anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldBeAbleToConfigureImagePullPolicy() throws InfrastructureException {
|
||||
// given
|
||||
pvcSubPathHelper =
|
||||
new PVCSubPathHelper(
|
||||
jobMemoryLimit,
|
||||
jobImage,
|
||||
"ToBeOrNotIfPresent",
|
||||
k8sNamespaceFactory,
|
||||
securityContextProvisioner,
|
||||
nodeSelectorProvisioner,
|
||||
tolerationsProvisioner,
|
||||
new NoopExecutorServiceWrapper(),
|
||||
eventsPublisher);
|
||||
// when
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
// then
|
||||
verify(osDeployments).create(podCaptor.capture());
|
||||
for (Container container : podCaptor.getValue().getSpec().getContainers()) {
|
||||
assertEquals(container.getImagePullPolicy(), "ToBeOrNotIfPresent");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCancelAsyncWaitWhenFailureEventReceived()
|
||||
throws InfrastructureException, ExecutionException, InterruptedException, TimeoutException {
|
||||
// given
|
||||
CompletableFuture<Pod> futureToCancel = (CompletableFuture<Pod>) mock(CompletableFuture.class);
|
||||
when(osDeployments.waitAsync(anyString(), any())).thenReturn(futureToCancel);
|
||||
when(futureToCancel.get(anyLong(), any(TimeUnit.class))).thenReturn(pod);
|
||||
|
||||
List<PodEventHandler> containerEventsHandlers = new ArrayList<>();
|
||||
Watcher<Event> watcher =
|
||||
new Watcher<>() {
|
||||
@Override
|
||||
public boolean reconnecting() {
|
||||
return Watcher.super.reconnecting();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void eventReceived(Action action, Event event) {
|
||||
containerEventsHandlers.forEach(
|
||||
h ->
|
||||
h.handle(
|
||||
new PodEvent(
|
||||
RM_COMMAND_BASE[0] + "-" + WORKSPACE_ID,
|
||||
"containerName",
|
||||
event.getReason(),
|
||||
"message",
|
||||
"creationTimestamp",
|
||||
"lastTimestamp")));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose() {
|
||||
Watcher.super.onClose();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(WatcherException e) {}
|
||||
};
|
||||
|
||||
doAnswer(invocation -> containerEventsHandlers.add(invocation.getArgument(0)))
|
||||
.when(osDeployments)
|
||||
.watchEvents(any(PodEventHandler.class));
|
||||
|
||||
// when
|
||||
pvcSubPathHelper
|
||||
.removeDirsAsync(WORKSPACE_ID, NAMESPACE, PVC_NAME, WORKSPACE_ID + PROJECTS_PATH)
|
||||
.get();
|
||||
// simulate failure events
|
||||
watcher.eventReceived(Watcher.Action.ADDED, newEvent("Failed"));
|
||||
watcher.eventReceived(Watcher.Action.ADDED, newEvent("FailedScheduling"));
|
||||
watcher.eventReceived(Watcher.Action.ADDED, newEvent("FailedMount"));
|
||||
|
||||
// then
|
||||
verify(futureToCancel, times(3)).cancel(anyBoolean());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWatchFailureEvents() throws InfrastructureException {
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, true, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments).watchEvents(any(PodEventHandler.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoNotWatchFailureEvents() throws InfrastructureException {
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
|
||||
verify(osDeployments, never()).watchEvents(any(PodEventHandler.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoNotCreatePodWhenPVCDoesNotExist() throws InfrastructureException {
|
||||
when(kubernetesPVCs.get()).thenReturn(Collections.emptyList());
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
verify(osDeployments, never()).create(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoNotCreatePodWhenPVCIsTerminating() throws InfrastructureException {
|
||||
when(pvcStatus.getPhase()).thenReturn(PVC_PHASE_TERMINATING);
|
||||
pvcSubPathHelper.execute(
|
||||
WORKSPACE_ID, NAMESPACE, PVC_NAME, MKDIR_COMMAND_BASE, WORKSPACE_ID + PROJECTS_PATH);
|
||||
verify(osDeployments, never()).create(any());
|
||||
}
|
||||
|
||||
private static Event newEvent(String reason) {
|
||||
Event event = new Event();
|
||||
event.setReason(reason);
|
||||
return event;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,218 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.TestObjects.newContainer;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.TestObjects.newPod;
|
||||
import static org.testng.Assert.assertEquals;
|
||||
import static org.testng.Assert.assertNotNull;
|
||||
import static org.testng.Assert.assertNull;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder;
|
||||
import io.fabric8.kubernetes.api.model.ContainerBuilder;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimVolumeSourceBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Volume;
|
||||
import io.fabric8.kubernetes.api.model.VolumeBuilder;
|
||||
import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment.PodData;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
/** @author Sergii Leshchenko */
|
||||
public class PodsVolumesTest {
|
||||
private static final String POD_1_NAME = "main";
|
||||
private static final String CONTAINER_1_NAME = "app";
|
||||
private static final String CONTAINER_2_NAME = "db";
|
||||
|
||||
private PodData podData;
|
||||
|
||||
private PodsVolumes podsVolumes;
|
||||
|
||||
@BeforeMethod
|
||||
public void setUp() {
|
||||
Pod pod =
|
||||
newPod(POD_1_NAME)
|
||||
.withContainers(
|
||||
newContainer(CONTAINER_1_NAME).build(), newContainer(CONTAINER_2_NAME).build())
|
||||
.build();
|
||||
podData = new PodData(pod.getSpec(), pod.getMetadata());
|
||||
|
||||
podsVolumes = new PodsVolumes();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldChangePVCReference() {
|
||||
// given
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("userData")
|
||||
.withPersistentVolumeClaim(
|
||||
new PersistentVolumeClaimVolumeSourceBuilder()
|
||||
.withClaimName("userData")
|
||||
.build())
|
||||
.build());
|
||||
|
||||
// when
|
||||
podsVolumes.changePVCReferences(ImmutableList.of(podData), "userData", "newPVCName");
|
||||
|
||||
// then
|
||||
assertEquals(podData.getSpec().getVolumes().size(), 1);
|
||||
Volume volume = podData.getSpec().getVolumes().get(0);
|
||||
assertEquals(volume.getPersistentVolumeClaim().getClaimName(), "newPVCName");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotChangeNonMatchingVolumesChangePVCReference() {
|
||||
// given
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("userData")
|
||||
.withPersistentVolumeClaim(
|
||||
new PersistentVolumeClaimVolumeSourceBuilder()
|
||||
.withClaimName("nonMatching")
|
||||
.build())
|
||||
.build());
|
||||
|
||||
// when
|
||||
podsVolumes.changePVCReferences(ImmutableList.of(podData), "userData", "newPVCName");
|
||||
|
||||
// then
|
||||
assertEquals(podData.getSpec().getVolumes().size(), 1);
|
||||
Volume volume = podData.getSpec().getVolumes().get(0);
|
||||
assertEquals(volume.getPersistentVolumeClaim().getClaimName(), "nonMatching");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldReplaceVolumesWithCommon() {
|
||||
// given
|
||||
podData
|
||||
.getSpec()
|
||||
.getInitContainers()
|
||||
.add(
|
||||
new ContainerBuilder()
|
||||
.withName("userInitContainer")
|
||||
.withVolumeMounts(
|
||||
new VolumeMountBuilder()
|
||||
.withName("initData")
|
||||
.withSubPath("/tmp/init/userData")
|
||||
.build())
|
||||
.build());
|
||||
|
||||
podData
|
||||
.getSpec()
|
||||
.getContainers()
|
||||
.get(0)
|
||||
.getVolumeMounts()
|
||||
.add(new VolumeMountBuilder().withName("userData").withSubPath("/home/user/data").build());
|
||||
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("userData")
|
||||
.withPersistentVolumeClaim(
|
||||
new PersistentVolumeClaimVolumeSourceBuilder()
|
||||
.withClaimName("userDataPVC")
|
||||
.build())
|
||||
.build());
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("initData")
|
||||
.withPersistentVolumeClaim(
|
||||
new PersistentVolumeClaimVolumeSourceBuilder()
|
||||
.withClaimName("initDataPVC")
|
||||
.build())
|
||||
.build());
|
||||
|
||||
// when
|
||||
podsVolumes.replacePVCVolumesWithCommon(ImmutableMap.of("pod", podData), "commonPVC");
|
||||
|
||||
// then
|
||||
assertEquals(podData.getSpec().getVolumes().size(), 1);
|
||||
assertEquals(
|
||||
podData.getSpec().getVolumes().get(0).getPersistentVolumeClaim().getClaimName(),
|
||||
"commonPVC");
|
||||
assertEquals(
|
||||
podData.getSpec().getInitContainers().get(0).getVolumeMounts().get(0).getName(),
|
||||
"commonPVC");
|
||||
assertEquals(
|
||||
podData.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), "commonPVC");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotReplaceNonPVCVolumes() {
|
||||
// given
|
||||
podData
|
||||
.getSpec()
|
||||
.getInitContainers()
|
||||
.add(
|
||||
new ContainerBuilder()
|
||||
.withName("userInitContainer")
|
||||
.withVolumeMounts(new VolumeMountBuilder().withName("configMap").build())
|
||||
.build());
|
||||
|
||||
podData
|
||||
.getSpec()
|
||||
.getContainers()
|
||||
.get(0)
|
||||
.getVolumeMounts()
|
||||
.add(new VolumeMountBuilder().withName("secret").withSubPath("/home/user/data").build());
|
||||
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("configMap")
|
||||
.withConfigMap(new ConfigMapVolumeSourceBuilder().withName("configMap").build())
|
||||
.build());
|
||||
podData
|
||||
.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("secret")
|
||||
.withSecret(new SecretVolumeSourceBuilder().withSecretName("secret").build())
|
||||
.build());
|
||||
|
||||
// when
|
||||
podsVolumes.replacePVCVolumesWithCommon(ImmutableMap.of("pod", podData), "commonPVC");
|
||||
|
||||
// then
|
||||
assertEquals(podData.getSpec().getVolumes().size(), 2);
|
||||
assertNotNull(podData.getSpec().getVolumes().get(0).getConfigMap());
|
||||
assertNull(podData.getSpec().getVolumes().get(0).getPersistentVolumeClaim());
|
||||
|
||||
assertNotNull(podData.getSpec().getVolumes().get(1).getSecret());
|
||||
assertNull(podData.getSpec().getVolumes().get(1).getPersistentVolumeClaim());
|
||||
|
||||
assertEquals(
|
||||
podData.getSpec().getInitContainers().get(0).getVolumeMounts().get(0).getName(),
|
||||
"configMap");
|
||||
assertEquals(
|
||||
podData.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), "secret");
|
||||
}
|
||||
}
|
||||
|
|
@ -1,207 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2021 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.Constants.CHE_VOLUME_NAME_LABEL;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.TestObjects.newContainer;
|
||||
import static org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.TestObjects.newPod;
|
||||
import static org.testng.Assert.assertEquals;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Container;
|
||||
import io.fabric8.kubernetes.api.model.ContainerBuilder;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder;
|
||||
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimVolumeSourceBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
import io.fabric8.kubernetes.api.model.PodSpec;
|
||||
import io.fabric8.kubernetes.api.model.Volume;
|
||||
import io.fabric8.kubernetes.api.model.VolumeBuilder;
|
||||
import io.fabric8.kubernetes.api.model.VolumeMount;
|
||||
import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.environment.KubernetesEnvironment;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
/** @author Sergii Leshchenko */
|
||||
public class SubPathPrefixesTest {
|
||||
|
||||
private static final String USER_DATA_PVC_NAME = "userDataPVC";
|
||||
private static final String WORKSPACE_ID = "workspace123";
|
||||
|
||||
private static final String POD_1_NAME = "main";
|
||||
private static final String CONTAINER_1_NAME = "app";
|
||||
private static final String CONTAINER_2_NAME = "db";
|
||||
|
||||
private Pod pod;
|
||||
|
||||
private PersistentVolumeClaim pvc;
|
||||
|
||||
private KubernetesEnvironment k8sEnv;
|
||||
|
||||
private SubPathPrefixes subpathPrefixes;
|
||||
|
||||
@BeforeMethod
|
||||
public void setup() throws Exception {
|
||||
subpathPrefixes = new SubPathPrefixes();
|
||||
|
||||
k8sEnv = KubernetesEnvironment.builder().build();
|
||||
|
||||
pod =
|
||||
newPod(POD_1_NAME)
|
||||
.withContainers(
|
||||
newContainer(CONTAINER_1_NAME).build(), newContainer(CONTAINER_2_NAME).build())
|
||||
.build();
|
||||
|
||||
k8sEnv.addPod(pod);
|
||||
|
||||
pvc = newPVC(USER_DATA_PVC_NAME);
|
||||
k8sEnv.getPersistentVolumeClaims().put(USER_DATA_PVC_NAME, pvc);
|
||||
|
||||
pod.getSpec()
|
||||
.getInitContainers()
|
||||
.add(
|
||||
new ContainerBuilder()
|
||||
.withName("userInitContainer")
|
||||
.withVolumeMounts(
|
||||
new VolumeMountBuilder()
|
||||
.withName("userData")
|
||||
.withSubPath("/tmp/init/userData")
|
||||
.build())
|
||||
.build());
|
||||
|
||||
pod.getSpec()
|
||||
.getContainers()
|
||||
.get(0)
|
||||
.getVolumeMounts()
|
||||
.add(new VolumeMountBuilder().withName("userData").withSubPath("/home/user/data").build());
|
||||
|
||||
pod.getSpec()
|
||||
.getVolumes()
|
||||
.add(
|
||||
new VolumeBuilder()
|
||||
.withName("userData")
|
||||
.withPersistentVolumeClaim(
|
||||
new PersistentVolumeClaimVolumeSourceBuilder()
|
||||
.withClaimName(USER_DATA_PVC_NAME)
|
||||
.build())
|
||||
.build());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldPrefixVolumeMountsSubpathsAndUsePvcNameAsVolumeName() {
|
||||
// when
|
||||
subpathPrefixes.prefixVolumeMountsSubpaths(k8sEnv, WORKSPACE_ID);
|
||||
|
||||
// then
|
||||
PodSpec podSpec = k8sEnv.getPodsData().get(POD_1_NAME).getSpec();
|
||||
|
||||
io.fabric8.kubernetes.api.model.Volume userPodVolume = podSpec.getVolumes().get(0);
|
||||
assertEquals(userPodVolume.getPersistentVolumeClaim().getClaimName(), USER_DATA_PVC_NAME);
|
||||
assertEquals(
|
||||
podSpec.getVolumes().get(0).getPersistentVolumeClaim().getClaimName(), USER_DATA_PVC_NAME);
|
||||
|
||||
Container initContainer = podSpec.getInitContainers().get(0);
|
||||
VolumeMount initVolumeMount = initContainer.getVolumeMounts().get(0);
|
||||
assertEquals(
|
||||
initVolumeMount.getSubPath(),
|
||||
WORKSPACE_ID + "/" + USER_DATA_PVC_NAME + "/tmp/init/userData");
|
||||
assertEquals(initVolumeMount.getName(), userPodVolume.getName());
|
||||
|
||||
Container container = podSpec.getContainers().get(0);
|
||||
VolumeMount volumeMount = container.getVolumeMounts().get(0);
|
||||
assertEquals(
|
||||
volumeMount.getSubPath(), WORKSPACE_ID + "/" + USER_DATA_PVC_NAME + "/home/user/data");
|
||||
assertEquals(volumeMount.getName(), userPodVolume.getName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotPrefixNotPVCSourcesVolumes() {
|
||||
// given
|
||||
Volume podVolume = pod.getSpec().getVolumes().get(0);
|
||||
podVolume.setPersistentVolumeClaim(null);
|
||||
podVolume.setConfigMap(new ConfigMapVolumeSourceBuilder().withName("configMap").build());
|
||||
|
||||
// when
|
||||
subpathPrefixes.prefixVolumeMountsSubpaths(k8sEnv, WORKSPACE_ID);
|
||||
|
||||
// then
|
||||
PodSpec podSpec = k8sEnv.getPodsData().get(POD_1_NAME).getSpec();
|
||||
|
||||
io.fabric8.kubernetes.api.model.Volume podDataVolume = podSpec.getVolumes().get(0);
|
||||
|
||||
Container initContainer = podSpec.getInitContainers().get(0);
|
||||
VolumeMount initVolumeMount = initContainer.getVolumeMounts().get(0);
|
||||
assertEquals(initVolumeMount.getSubPath(), "/tmp/init/userData");
|
||||
assertEquals(initVolumeMount.getName(), podDataVolume.getName());
|
||||
|
||||
Container container = podSpec.getContainers().get(0);
|
||||
VolumeMount volumeMount = container.getVolumeMounts().get(0);
|
||||
assertEquals(volumeMount.getSubPath(), "/home/user/data");
|
||||
assertEquals(volumeMount.getName(), podDataVolume.getName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldPrefixVolumeMountsSubpathsAndUseVolumeNameStoredInLabels() {
|
||||
// given
|
||||
String volumeName = "userDataVolume";
|
||||
pvc.getMetadata().getLabels().put(CHE_VOLUME_NAME_LABEL, volumeName);
|
||||
|
||||
// when
|
||||
subpathPrefixes.prefixVolumeMountsSubpaths(k8sEnv, WORKSPACE_ID);
|
||||
|
||||
// then
|
||||
PodSpec podSpec = k8sEnv.getPodsData().get(POD_1_NAME).getSpec();
|
||||
|
||||
io.fabric8.kubernetes.api.model.Volume userPodVolume = podSpec.getVolumes().get(0);
|
||||
assertEquals(userPodVolume.getPersistentVolumeClaim().getClaimName(), USER_DATA_PVC_NAME);
|
||||
assertEquals(
|
||||
podSpec.getVolumes().get(0).getPersistentVolumeClaim().getClaimName(), USER_DATA_PVC_NAME);
|
||||
|
||||
Container initContainer = podSpec.getInitContainers().get(0);
|
||||
VolumeMount initVolumeMount = initContainer.getVolumeMounts().get(0);
|
||||
assertEquals(
|
||||
initVolumeMount.getSubPath(), WORKSPACE_ID + "/" + volumeName + "/tmp/init/userData");
|
||||
assertEquals(initVolumeMount.getName(), userPodVolume.getName());
|
||||
|
||||
Container container = podSpec.getContainers().get(0);
|
||||
VolumeMount volumeMount = container.getVolumeMounts().get(0);
|
||||
assertEquals(volumeMount.getSubPath(), WORKSPACE_ID + "/" + volumeName + "/home/user/data");
|
||||
assertEquals(volumeMount.getName(), userPodVolume.getName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldReturnWorkspaceIdAsSubpathForWorkspace() {
|
||||
// when
|
||||
String workspaceSubPath = subpathPrefixes.getWorkspaceSubPath(WORKSPACE_ID);
|
||||
|
||||
// then
|
||||
assertEquals(workspaceSubPath, WORKSPACE_ID);
|
||||
}
|
||||
|
||||
private static PersistentVolumeClaim newPVC(String name) {
|
||||
return newPVC(name, new HashMap<>());
|
||||
}
|
||||
|
||||
private static PersistentVolumeClaim newPVC(String name, Map<String, String> labels) {
|
||||
return new PersistentVolumeClaimBuilder()
|
||||
.withNewMetadata()
|
||||
.withName(name)
|
||||
.withLabels(labels)
|
||||
.endMetadata()
|
||||
.withNewSpec()
|
||||
.endSpec()
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2022 Red Hat, Inc.
|
||||
* This program and the accompanying materials are made
|
||||
* available under the terms of the Eclipse Public License 2.0
|
||||
* which is available at https://www.eclipse.org/legal/epl-2.0/
|
||||
*
|
||||
* SPDX-License-Identifier: EPL-2.0
|
||||
*
|
||||
* Contributors:
|
||||
* Red Hat, Inc. - initial API and implementation
|
||||
*/
|
||||
package org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
import org.eclipse.che.api.core.model.workspace.Workspace;
|
||||
import org.eclipse.che.api.core.notification.EventService;
|
||||
import org.eclipse.che.api.workspace.shared.event.WorkspaceRemovedEvent;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.testng.MockitoTestNGListener;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Listeners;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
/**
|
||||
* Tests {@link WorkspacePVCCleaner}.
|
||||
*
|
||||
* @author Anton Korneta
|
||||
*/
|
||||
@Listeners(MockitoTestNGListener.class)
|
||||
public class WorkspacePVCCleanerTest {
|
||||
|
||||
@Mock private WorkspaceVolumesStrategy pvcStrategy;
|
||||
private EventService eventService;
|
||||
@Mock private Workspace workspace;
|
||||
@Mock WorkspaceRemovedEvent event;
|
||||
|
||||
private WorkspacePVCCleaner workspacePVCCleaner;
|
||||
|
||||
@BeforeMethod
|
||||
public void setUp() throws Exception {
|
||||
workspacePVCCleaner = new WorkspacePVCCleaner(pvcStrategy);
|
||||
eventService = spy(new EventService());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoNotSubscribesCleanerWhenPVCDisabled() throws Exception {
|
||||
workspacePVCCleaner = spy(new WorkspacePVCCleaner(pvcStrategy));
|
||||
|
||||
workspacePVCCleaner.subscribe(eventService);
|
||||
|
||||
verify(eventService, never()).subscribe(any(), eq(WorkspaceRemovedEvent.class));
|
||||
}
|
||||
}
|
||||
|
|
@ -18,7 +18,6 @@ import org.eclipse.che.api.workspace.server.spi.InfrastructureException;
|
|||
import org.eclipse.che.commons.annotation.Traced;
|
||||
import org.eclipse.che.commons.tracing.TracingTags;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.KubernetesEnvironmentProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.WorkspaceVolumesStrategy;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.CertificateProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.DeploymentMetadataProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.GatewayRouterProvisioner;
|
||||
|
|
@ -60,8 +59,6 @@ public class OpenShiftEnvironmentProvisioner
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(OpenShiftEnvironmentProvisioner.class);
|
||||
|
||||
private final boolean pvcEnabled;
|
||||
private final WorkspaceVolumesStrategy volumesStrategy;
|
||||
private final UniqueNamesProvisioner<OpenShiftEnvironment> uniqueNamesProvisioner;
|
||||
private final TlsProvisioner<OpenShiftEnvironment> routeTlsProvisioner;
|
||||
private final ServersConverter<OpenShiftEnvironment> serversConverter;
|
||||
|
|
@ -91,7 +88,6 @@ public class OpenShiftEnvironmentProvisioner
|
|||
ServersConverter<OpenShiftEnvironment> serversConverter,
|
||||
EnvVarsConverter envVarsConverter,
|
||||
RestartPolicyRewriter restartPolicyRewriter,
|
||||
WorkspaceVolumesStrategy volumesStrategy,
|
||||
ContainerResourceProvisioner resourceLimitRequestProvisioner,
|
||||
LogsVolumeMachineProvisioner logsVolumeMachineProvisioner,
|
||||
PodTerminationGracePeriodProvisioner podTerminationGracePeriodProvisioner,
|
||||
|
|
@ -108,8 +104,6 @@ public class OpenShiftEnvironmentProvisioner
|
|||
GatewayRouterProvisioner gatewayRouterProvisioner,
|
||||
DeploymentMetadataProvisioner deploymentMetadataProvisioner,
|
||||
OpenshiftTrustedCAProvisioner trustedCAProvisioner) {
|
||||
this.pvcEnabled = false;
|
||||
this.volumesStrategy = volumesStrategy;
|
||||
this.uniqueNamesProvisioner = uniqueNamesProvisioner;
|
||||
this.routeTlsProvisioner = routeTlsProvisionerProvider.get();
|
||||
this.serversConverter = serversConverter;
|
||||
|
|
@ -141,21 +135,13 @@ public class OpenShiftEnvironmentProvisioner
|
|||
|
||||
LOG.debug(
|
||||
"Start provisioning OpenShift environment for workspace '{}'", identity.getWorkspaceId());
|
||||
// 1 stage - update environment according Infrastructure specific
|
||||
if (pvcEnabled) {
|
||||
// TODO: Remove things related to pvcEnabled boolean
|
||||
logsVolumeMachineProvisioner.provision(osEnv, identity);
|
||||
}
|
||||
|
||||
// 2 stage - converting Che model env to OpenShift env
|
||||
// 1st stage - converting Che model env to OpenShift env
|
||||
serversConverter.provision(osEnv, identity);
|
||||
previewUrlExposer.expose(osEnv);
|
||||
envVarsConverter.provision(osEnv, identity);
|
||||
if (pvcEnabled) {
|
||||
volumesStrategy.provision(osEnv, identity);
|
||||
}
|
||||
|
||||
// 3 stage - add OpenShift env items
|
||||
// 2nd stage - add OpenShift env items
|
||||
restartPolicyRewriter.provision(osEnv, identity);
|
||||
routeTlsProvisioner.provision(osEnv, identity);
|
||||
resourceLimitRequestProvisioner.provision(osEnv, identity);
|
||||
|
|
|
|||
|
|
@ -56,7 +56,6 @@ import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.configurato
|
|||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.configurator.SshKeysConfigurator;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.configurator.UserPreferencesConfigurator;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.configurator.UserProfileConfigurator;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.WorkspacePVCCleaner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.GatewayTlsProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.KubernetesCheApiExternalEnvVarProvider;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.KubernetesCheApiInternalEnvVarProvider;
|
||||
|
|
@ -137,7 +136,6 @@ public class OpenShiftInfraModule extends AbstractModule {
|
|||
install(new FactoryModuleBuilder().build(OpenShiftRuntimeFactory.class));
|
||||
install(new FactoryModuleBuilder().build(StartSynchronizerFactory.class));
|
||||
|
||||
bind(WorkspacePVCCleaner.class).asEagerSingleton();
|
||||
bind(RemoveProjectOnWorkspaceRemove.class).asEagerSingleton();
|
||||
|
||||
bind(TrustedCAProvisioner.class).to(OpenshiftTrustedCAProvisioner.class);
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import static org.mockito.Mockito.inOrder;
|
|||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.eclipse.che.api.core.model.workspace.runtime.RuntimeIdentity;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.namespace.pvc.WorkspaceVolumesStrategy;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.CertificateProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.DeploymentMetadataProvisioner;
|
||||
import org.eclipse.che.workspace.infrastructure.kubernetes.provision.GatewayRouterProvisioner;
|
||||
|
|
@ -55,7 +54,6 @@ import org.testng.annotations.Test;
|
|||
@Listeners(MockitoTestNGListener.class)
|
||||
public class OpenShiftEnvironmentProvisionerTest {
|
||||
|
||||
@Mock private WorkspaceVolumesStrategy volumesStrategy;
|
||||
@Mock private OpenShiftUniqueNamesProvisioner uniqueNamesProvisioner;
|
||||
@Mock private OpenShiftEnvironment osEnv;
|
||||
@Mock private RuntimeIdentity runtimeIdentity;
|
||||
|
|
@ -95,7 +93,6 @@ public class OpenShiftEnvironmentProvisionerTest {
|
|||
serversProvisioner,
|
||||
envVarsProvisioner,
|
||||
restartPolicyRewriter,
|
||||
volumesStrategy,
|
||||
ramLimitProvisioner,
|
||||
logsVolumeMachineProvisioner,
|
||||
podTerminationGracePeriodProvisioner,
|
||||
|
|
@ -117,7 +114,6 @@ public class OpenShiftEnvironmentProvisionerTest {
|
|||
logsVolumeMachineProvisioner,
|
||||
serversProvisioner,
|
||||
envVarsProvisioner,
|
||||
volumesStrategy,
|
||||
uniqueNamesProvisioner,
|
||||
tlsRouteProvisioner,
|
||||
restartPolicyRewriter,
|
||||
|
|
|
|||
Loading…
Reference in New Issue