feat: Import Devworkspace Che Operator (#925)

* Import of DWCO into CO.

Co-authored-by: Anatolii Bazko <abazko@redhat.com>
Co-authored-by: Michal Vala <mvala@redhat.com>
pull/1007/head
Lukas Krejci 2021-08-11 14:07:44 +02:00 committed by GitHub
parent 7ff7399be9
commit 456743ce01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
114 changed files with 14414 additions and 1065 deletions

View File

@ -39,10 +39,6 @@ runTests() {
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
sleep 10s
createWorkspaceDevWorkspaceCheOperator
waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}
}
initDefaults

View File

@ -48,7 +48,6 @@ function bumpPodsInfo() {
function Catch_Finish() {
# grab devworkspace-controller namespace events after running e2e
bumpPodsInfo "devworkspace-controller"
bumpPodsInfo "devworkspace-che"
bumpPodsInfo "admin-che"
oc get devworkspaces -n "admin-che" -o=yaml > $ARTIFACTS_DIR/devworkspaces.yaml

View File

@ -48,14 +48,9 @@ runTests() {
enableDevWorkspaceEngine
waitDevWorkspaceControllerStarted
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
sleep 10s
createWorkspaceDevWorkspaceCheOperator
waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
}
initDefaults

View File

@ -51,10 +51,6 @@ runTests() {
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
sleep 10s
createWorkspaceDevWorkspaceCheOperator
waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}
}
initDefaults

View File

@ -487,7 +487,6 @@ waitDevWorkspaceControllerStarted() {
OPERATOR_POD=$(oc get pods -o json -n ${NAMESPACE} | jq -r '.items[] | select(.metadata.name | test("che-operator-")).metadata.name')
oc logs ${OPERATOR_POD} -c che-operator -n ${NAMESPACE}
oc logs ${OPERATOR_POD} -c devworkspace-che-operator -n ${NAMESPACE}
exit 1
}
@ -500,7 +499,7 @@ createWorkspaceDevWorkspaceController () {
CURRENT_TIME=$(date +%s)
ENDTIME=$(($CURRENT_TIME + 180))
while [ $(date +%s) -lt $ENDTIME ]; do
if oc apply -f https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/samples/flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}; then
if oc apply -f ${OPERATOR_REPO}/config/samples/devworkspace_flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}; then
break
fi
sleep 10
@ -528,12 +527,6 @@ waitAllPodsRunning() {
exit 1
}
createWorkspaceDevWorkspaceCheOperator() {
oc create namespace ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}
sleep 10s
oc apply -f https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/samples/flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}
}
enableDevWorkspaceEngine() {
kubectl patch checluster/eclipse-che -n ${NAMESPACE} --type=merge -p "{\"spec\":{\"server\":{\"customCheProperties\": {\"CHE_INFRA_KUBERNETES_ENABLE__UNSUPPORTED__K8S\": \"true\"}}}}"
kubectl patch checluster/eclipse-che -n ${NAMESPACE} --type=merge -p '{"spec":{"devWorkspace":{"enable": true}}}'

View File

@ -39,6 +39,10 @@ runTest() {
# Dev Workspace controller tests
enableDevWorkspaceEngine
waitDevWorkspaceControllerStarted
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
}
initDefaults

View File

@ -48,6 +48,10 @@ runTest() {
# Dev Workspace controller tests
enableDevWorkspaceEngine
waitDevWorkspaceControllerStarted
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
}
initDefaults

View File

@ -47,6 +47,10 @@ runTest() {
# Dev Workspace controller tests
enableDevWorkspaceEngine
waitDevWorkspaceControllerStarted
sleep 10s
createWorkspaceDevWorkspaceController
waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}
}
initDefaults

View File

@ -73,8 +73,6 @@ jobs:
CHE_VERSION=${{ github.event.inputs.version }}
DWO_VERSION=${{ github.event.inputs.dwoVersion }}
if [[ ${DWO_VERSION} != "v"* ]]; then DWO_VERSION="v${DWO_VERSION}"; fi
DWO_CHE_VERSION=${{ github.event.inputs.dwoCheVersion }}
if [[ ${DWO_CHE_VERSION} != "v"* ]]; then DWO_CHE_VERSION="v${DWO_CHE_VERSION}"; fi
echo "CHE_VERSION=${CHE_VERSION}"
BRANCH=${CHE_VERSION%.*}.x
echo "BRANCH=${BRANCH}"
@ -93,10 +91,10 @@ jobs:
export QUAY_ECLIPSE_CHE_PASSWORD=${{ secrets.QUAY_PASSWORD }}
if [[ ${CHE_VERSION} == *".0" ]]; then
./make-release.sh ${CHE_VERSION} --release --check-resources --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} --dev-workspace-che-operator-version ${DWO_CHE_VERSION}
./make-release.sh ${CHE_VERSION} --release --check-resources --release-olm-files --dev-workspace-controller-version ${DWO_VERSION}
else
git checkout ${BRANCH}
./make-release.sh ${CHE_VERSION} --release --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} --dev-workspace-che-operator-version ${DWO_CHE_VERSION}
./make-release.sh ${CHE_VERSION} --release --release-olm-files --dev-workspace-controller-version ${DWO_VERSION}
fi
# default robot account on quay does not have permissions for application repos

View File

@ -14,7 +14,6 @@ FROM registry.access.redhat.com/ubi8/go-toolset:1.15.13-4 as builder
ENV GOPATH=/go/
ENV RESTIC_TAG=v0.12.0
ARG DEV_WORKSPACE_CONTROLLER_VERSION="main"
ARG DEV_WORKSPACE_CHE_OPERATOR_VERSION="main"
ARG DEV_HEADER_REWRITE_TRAEFIK_PLUGIN="main"
USER root
@ -52,10 +51,6 @@ RUN unzip /tmp/asset-devworkspace-operator.zip */deploy/deployment/* -d /tmp &&
mkdir -p /tmp/devworkspace-operator/templates/ && \
mv /tmp/devfile-devworkspace-operator-*/deploy /tmp/devworkspace-operator/templates/
RUN unzip /tmp/asset-devworkspace-che-operator.zip */deploy/deployment/* -d /tmp && \
mkdir -p /tmp/devworkspace-che-operator/templates/ && \
mv /tmp/che-incubator-devworkspace-che-operator-*/deploy /tmp/devworkspace-che-operator/templates/
RUN unzip /tmp/asset-header-rewrite-traefik-plugin.zip -d /tmp && \
mkdir -p /tmp/header-rewrite-traefik-plugin && \
mv /tmp/*-header-rewrite-traefik-plugin-*/headerRewrite.go /tmp/*-header-rewrite-traefik-plugin-*/.traefik.yml /tmp/header-rewrite-traefik-plugin
@ -71,7 +66,6 @@ FROM registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526
COPY --from=builder /che-operator/che-operator /manager
COPY --from=builder /che-operator/templates/*.sh /tmp/
COPY --from=builder /tmp/devworkspace-operator/templates/deploy /tmp/devworkspace-operator/templates
COPY --from=builder /tmp/devworkspace-che-operator/templates/deploy /tmp/devworkspace-che-operator/templates
COPY --from=builder /tmp/header-rewrite-traefik-plugin /tmp/header-rewrite-traefik-plugin
COPY --from=builder /tmp/restic/restic /usr/local/bin/restic
COPY --from=builder /go/restic/LICENSE /usr/local/bin/restic-LICENSE.txt

View File

@ -331,19 +331,6 @@ prepare-templates:
cp -rf /tmp/devfile-devworkspace-operator*/deploy/* /tmp/devworkspace-operator/templates
echo "[INFO] Downloading Dev Workspace operator templates completed."
# Download Dev Workspace Che operator templates
echo "[INFO] Downloading Dev Workspace Che operator templates ..."
rm -f /tmp/devworkspace-che-operator.zip
rm -rf /tmp/che-incubator-devworkspace-che-operator-*
rm -rf /tmp/devworkspace-che-operator/
mkdir -p /tmp/devworkspace-che-operator/templates
curl -sL https://api.github.com/repos/che-incubator/devworkspace-che-operator/zipball/${DEV_WORKSPACE_CHE_OPERATOR_VERSION} > /tmp/devworkspace-che-operator.zip
unzip -q /tmp/devworkspace-che-operator.zip '*/deploy/deployment/*' -d /tmp
cp -r /tmp/che-incubator-devworkspace-che-operator*/deploy/* /tmp/devworkspace-che-operator/templates
echo "[INFO] Downloading Dev Workspace operator templates completed."
create-namespace:
set +e
kubectl create namespace ${ECLIPSE_CHE_NAMESPACE} || true
@ -432,7 +419,7 @@ rm -rf $$TMP_DIR ;\
endef
update-roles:
echo "[INFO] Updating roles with DW and DWCO roles"
echo "[INFO] Updating roles with DW roles"
CLUSTER_ROLES=(
https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-view-workspaces.ClusterRole.yaml
@ -441,11 +428,9 @@ update-roles:
https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-proxy-role.ClusterRole.yaml
https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml
https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-view-workspaces.ClusterRole.yaml
https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-role.ClusterRole.yaml
https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-metrics-reader.ClusterRole.yaml
)
# Updates cluster_role.yaml based on DW and DWCO roles
# Updates cluster_role.yaml based on DW roles
## Removes old cluster roles
cat config/rbac/cluster_role.yaml | sed '/CHE-OPERATOR ROLES ONLY: END/q0' > config/rbac/cluster_role.yaml.tmp
mv config/rbac/cluster_role.yaml.tmp config/rbac/cluster_role.yaml
@ -461,7 +446,7 @@ update-roles:
done
ROLES=(
https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-leader-election-role.Role.yaml
# currently, there are no other roles we need to incorporate
)
# Updates role.yaml
@ -669,8 +654,6 @@ bundle: generate manifests kustomize ## Generate bundle manifests and metadata,
if [ "$${platform}" = "openshift" ]; then
yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[0].securityContext."allowPrivilegeEscalation") = false' "$${NEW_CSV}"
yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[0].securityContext."runAsNonRoot") = true' "$${NEW_CSV}"
yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[1].securityContext."allowPrivilegeEscalation") = false' "$${NEW_CSV}"
yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[1].securityContext."runAsNonRoot") = true' "$${NEW_CSV}"
fi
# Format code.
@ -809,39 +792,6 @@ update-deployment-yaml-images:
yq -riY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_che_server_secure_exposer_jwt_proxy_image\") | .value ) = \"$(JWT_PROXY_IMAGE)\"" $(OPERATOR_YAML)
$(MAKE) ensure-license-header FILE="config/manager/manager.yaml"
update-devworkspace-container:
echo "[INFO] Update devworkspace container in the che-operator deployment"
# Deletes old DWCO container
yq -riY "del(.spec.template.spec.containers[1])" $(OPERATOR_YAML)
yq -riY ".spec.template.spec.containers[1].name = \"devworkspace-container\"" $(OPERATOR_YAML)
# Extract DWCO container spec from deployment
DWCO_CONTAINER=$$(curl -sL https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-manager.Deployment.yaml \
| sed '1,/containers:/d' \
| sed -n '/serviceAccountName:/q;p' \
| sed -e 's/^/ /')
echo "$${DWCO_CONTAINER}" > dwcontainer
# Add DWCO container to manager.yaml
sed -i -e '/- name: devworkspace-container/{r dwcontainer' -e 'd}' $(OPERATOR_YAML)
rm dwcontainer
# update securityContext
yq -riY ".spec.template.spec.containers[1].securityContext.privileged = false" $(OPERATOR_YAML)
yq -riY ".spec.template.spec.containers[1].securityContext.readOnlyRootFilesystem = false" $(OPERATOR_YAML)
yq -riY ".spec.template.spec.containers[1].securityContext.capabilities.drop[0] = \"ALL\"" $(OPERATOR_YAML)
# update env variable
yq -riY "del( .spec.template.spec.containers[1].env[] | select(.name == \"CONTROLLER_SERVICE_ACCOUNT_NAME\") | .valueFrom)" $(OPERATOR_YAML)
yq -riY "( .spec.template.spec.containers[1].env[] | select(.name == \"CONTROLLER_SERVICE_ACCOUNT_NAME\") | .value) = \"che-operator\"" $(OPERATOR_YAML)
yq -riY "del( .spec.template.spec.containers[1].env[] | select(.name == \"WATCH_NAMESPACE\") | .value)" $(OPERATOR_YAML)
yq -riY "( .spec.template.spec.containers[1].env[] | select(.name == \"WATCH_NAMESPACE\") | .valueFrom.fieldRef.fieldPath) = \"metadata.namespace\"" $(OPERATOR_YAML)
yq -riY ".spec.template.spec.containers[1].args[1] = \"--metrics-addr\"" $(OPERATOR_YAML)
yq -riY ".spec.template.spec.containers[1].args[2] = \"0\"" $(OPERATOR_YAML)
# $(MAKE) ensureLicense $(OPERATOR_YAML)
update-dockerfile-image:
if [ -z $(UBI8_MINIMAL_IMAGE) ]; then
echo "[ERROR] Define `UBI8_MINIMAL_IMAGE` argument"
@ -878,8 +828,6 @@ update-resource-images:
# Update che-operator Dockerfile
$(MAKE) update-dockerfile-image UBI8_MINIMAL_IMAGE="$${UBI8_MINIMAL_IMAGE}"
$(MAKE) update-devworkspace-container
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
if [ -z "$(platform)" ]; then

View File

@ -83,7 +83,7 @@ metadata:
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/eclipse-che/che-operator
support: Eclipse Foundation
name: eclipse-che-preview-kubernetes.v7.35.0-276.next
name: eclipse-che-preview-kubernetes.v7.35.0-279.next
namespace: placeholder
spec:
apiservicedefinitions: {}
@ -294,8 +294,9 @@ spec:
- oauthclients
verbs:
- create
- get
- delete
- deletecollection
- get
- list
- patch
- update
@ -304,16 +305,6 @@ spec:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- list
- create
- watch
- update
- get
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- list
@ -326,14 +317,6 @@ spec:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- get
- create
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- get
@ -347,6 +330,14 @@ spec:
- checlusters/status
- checlusters/finalizers
- checlusters/status
- checlusterbackups
- checlusterbackups/status
- checlusterbackups/finalizers
- checlusterrestores
- checlusterrestores/status
- backupserverconfigurations
- backupserverconfigurations/status
- chebackupserverconfigurations
verbs:
- '*'
- apiGroups:
@ -358,14 +349,6 @@ spec:
- list
- create
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- create
- watch
- apiGroups:
- ""
resources:
@ -411,40 +394,24 @@ spec:
- delete
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- get
- list
- create
- patch
- watch
- delete
- apiGroups:
- ""
resources:
- services
verbs:
- list
- create
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
- delete
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
- delete
- apiGroups:
- ""
resources:
@ -465,11 +432,7 @@ spec:
resources:
- ingresses
verbs:
- list
- create
- watch
- get
- delete
- '*'
- apiGroups:
- networking.k8s.io
resources:
@ -494,6 +457,14 @@ spec:
- subscriptions
verbs:
- get
- apiGroups:
- operators.coreos.com
resources:
- clusterserviceversions
verbs:
- list
- get
- watch
- apiGroups:
- metrics.k8s.io
resources:
@ -513,6 +484,72 @@ spec:
- get
- list
- update
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- nonResourceURLs:
- /metrics
verbs:
- get
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
- apiGroups:
- workspace.devfile.io
resources:
@ -776,173 +813,6 @@ spec:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- devworkspace-che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- get
- list
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- org.eclipse.che
resources:
- checlusters
- checlusters/status
- checlusters/finalizers
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- configmap
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- oauth.openshift.io
resources:
- oauthclients
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
- nonResourceURLs:
- /metrics
verbs:
- get
serviceAccountName: che-operator
deployments:
- name: che-operator
@ -991,7 +861,7 @@ spec:
- name: RELATED_IMAGE_che_tls_secrets_creation_job
value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad
- name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526
value: registry.access.redhat.com/ubi8-minimal:8.4-208
- name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak
@ -1038,6 +908,8 @@ spec:
value: che-postgres-secret
- name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME
value: ca-certs
- name: MAX_CONCURRENT_RECONCILES
value: "1"
image: quay.io/eclipse/che-operator:next
imagePullPolicy: Always
livenessProbe:
@ -1075,47 +947,6 @@ spec:
- ALL
privileged: false
readOnlyRootFilesystem: false
- args:
- --enable-leader-election
- --metrics-addr
- "0"
command:
- /usr/local/bin/devworkspace-che-operator
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: devworkspace-che-operator
- name: MAX_CONCURRENT_RECONCILES
value: "1"
- name: CONTROLLER_SERVICE_ACCOUNT_NAME
value: che-operator
- name: RELATED_IMAGE_gateway
value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23
- name: RELATED_IMAGE_gateway_configurer
value: quay.io/che-incubator/configbump:0.1.4
image: quay.io/che-incubator/devworkspace-che-operator:ci
imagePullPolicy: Always
name: devworkspace-che-operator
resources:
limits:
cpu: 100m
memory: 256Mi
requests:
cpu: 100m
memory: 32Mi
securityContext:
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
hostIPC: false
hostNetwork: false
hostPID: false
@ -1179,8 +1010,10 @@ spec:
- get
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
@ -1215,12 +1048,6 @@ spec:
- get
- list
- watch
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
- apiGroups:
- operators.coreos.com
resources:
@ -1236,18 +1063,6 @@ spec:
verbs:
- get
- list
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
@ -1262,6 +1077,34 @@ spec:
- events
verbs:
- create
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
@ -1324,4 +1167,4 @@ spec:
maturity: stable
provider:
name: Eclipse Foundation
version: 7.35.0-276.next
version: 7.35.0-279.next

View File

@ -76,7 +76,7 @@ metadata:
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/eclipse-che/che-operator
support: Eclipse Foundation
name: eclipse-che-preview-openshift.v7.35.0-276.next
name: eclipse-che-preview-openshift.v7.35.0-279.next
namespace: placeholder
spec:
apiservicedefinitions: {}
@ -287,8 +287,9 @@ spec:
- oauthclients
verbs:
- create
- get
- delete
- deletecollection
- get
- list
- patch
- update
@ -339,16 +340,6 @@ spec:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- list
- create
- watch
- update
- get
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- list
@ -358,17 +349,9 @@ spec:
- get
- delete
- apiGroups:
- authorization.openshift.io
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- get
- create
- update
- delete
- apiGroups:
- authorization.openshift.io
resources:
- rolebindings
verbs:
- get
@ -376,17 +359,9 @@ spec:
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
- authorization.openshift.io
resources:
- roles
verbs:
- get
- create
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- get
@ -400,6 +375,14 @@ spec:
- checlusters/status
- checlusters/finalizers
- checlusters/status
- checlusterbackups
- checlusterbackups/status
- checlusterbackups/finalizers
- checlusterrestores
- checlusterrestores/status
- backupserverconfigurations
- backupserverconfigurations/status
- chebackupserverconfigurations
verbs:
- '*'
- apiGroups:
@ -425,14 +408,6 @@ spec:
- list
- create
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- create
- watch
- apiGroups:
- ""
resources:
@ -478,40 +453,24 @@ spec:
- delete
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- get
- list
- create
- patch
- watch
- delete
- apiGroups:
- ""
resources:
- services
verbs:
- list
- create
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
- delete
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
- delete
- apiGroups:
- ""
resources:
@ -532,11 +491,7 @@ spec:
resources:
- ingresses
verbs:
- list
- create
- watch
- get
- delete
- '*'
- apiGroups:
- networking.k8s.io
resources:
@ -561,6 +516,14 @@ spec:
- subscriptions
verbs:
- get
- apiGroups:
- operators.coreos.com
resources:
- clusterserviceversions
verbs:
- list
- get
- watch
- apiGroups:
- metrics.k8s.io
resources:
@ -570,6 +533,72 @@ spec:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- nonResourceURLs:
- /metrics
verbs:
- get
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
- apiGroups:
- workspace.devfile.io
resources:
@ -833,173 +862,6 @@ spec:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- devworkspace-che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- get
- list
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- org.eclipse.che
resources:
- checlusters
- checlusters/status
- checlusters/finalizers
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- configmap
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- oauth.openshift.io
resources:
- oauthclients
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
- nonResourceURLs:
- /metrics
verbs:
- get
serviceAccountName: che-operator
deployments:
- name: che-operator
@ -1046,7 +908,7 @@ spec:
- name: RELATED_IMAGE_devfile_registry
value: quay.io/eclipse/che-devfile-registry:next
- name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526
value: registry.access.redhat.com/ubi8-minimal:8.4-208
- name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak
@ -1093,6 +955,8 @@ spec:
value: che-postgres-secret
- name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME
value: ca-certs
- name: MAX_CONCURRENT_RECONCILES
value: "1"
image: quay.io/eclipse/che-operator:next
imagePullPolicy: Always
livenessProbe:
@ -1132,49 +996,6 @@ spec:
privileged: false
readOnlyRootFilesystem: false
runAsNonRoot: true
- args:
- --enable-leader-election
- --metrics-addr
- "0"
command:
- /usr/local/bin/devworkspace-che-operator
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: devworkspace-che-operator
- name: MAX_CONCURRENT_RECONCILES
value: "1"
- name: CONTROLLER_SERVICE_ACCOUNT_NAME
value: che-operator
- name: RELATED_IMAGE_gateway
value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23
- name: RELATED_IMAGE_gateway_configurer
value: quay.io/che-incubator/configbump:0.1.4
image: quay.io/che-incubator/devworkspace-che-operator:ci
imagePullPolicy: Always
name: devworkspace-che-operator
resources:
limits:
cpu: 100m
memory: 256Mi
requests:
cpu: 100m
memory: 32Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsNonRoot: true
hostIPC: false
hostNetwork: false
hostPID: false
@ -1245,8 +1066,10 @@ spec:
- get
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
@ -1281,12 +1104,6 @@ spec:
- get
- list
- watch
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
- apiGroups:
- operators.coreos.com
resources:
@ -1302,18 +1119,6 @@ spec:
verbs:
- get
- list
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
@ -1328,6 +1133,47 @@ spec:
- events
verbs:
- create
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- oauthclients
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
@ -1390,4 +1236,4 @@ spec:
maturity: stable
provider:
name: Eclipse Foundation
version: 7.35.0-276.next
version: 7.35.0-279.next

View File

@ -802,14 +802,6 @@ spec:
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- devworkspace-che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- apps
- extensions
@ -1006,8 +998,6 @@ spec:
value: quay.io/eclipse/che--traefik@sha256:df90799aaca1ad6fb9e06d311140035d2a0c2295a4f8f508f6b55ee056bb677e
- name: RELATED_IMAGE_single_host_gateway_config_sidecar
value: quay.io/che-incubator/configbump@sha256:175ff2ba1bd74429de192c0a9facf39da5699c6da9f151bd461b3dc8624dd532
- name: RELATED_IMAGE_devworkspace_che_operator
value: quay.io/che-incubator/devworkspace-che-operator@sha256:f943ada4d07ae8375f5a93bcc57f7f66335b14940bfe2c5d9565d155588ef514
- name: RELATED_IMAGE_devworkspace_controller
value: quay.io/devfile/devworkspace-controller@sha256:f17dad6df3f2f0f7b245e05677293bef1d35a17e0349002f9e47816de03c0cdd
- name: RELATED_IMAGE_internal_rest_backup_server

View File

@ -858,14 +858,6 @@ spec:
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- devworkspace-che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- apps
- extensions
@ -1060,8 +1052,6 @@ spec:
value: quay.io/eclipse/che--traefik@sha256:df90799aaca1ad6fb9e06d311140035d2a0c2295a4f8f508f6b55ee056bb677e
- name: RELATED_IMAGE_single_host_gateway_config_sidecar
value: quay.io/che-incubator/configbump@sha256:175ff2ba1bd74429de192c0a9facf39da5699c6da9f151bd461b3dc8624dd532
- name: RELATED_IMAGE_devworkspace_che_operator
value: quay.io/che-incubator/devworkspace-che-operator@sha256:f943ada4d07ae8375f5a93bcc57f7f66335b14940bfe2c5d9565d155588ef514
- name: RELATED_IMAGE_devworkspace_controller
value: quay.io/devfile/devworkspace-controller@sha256:f17dad6df3f2f0f7b245e05677293bef1d35a17e0349002f9e47816de03c0cdd
- name: RELATED_IMAGE_internal_rest_backup_server

View File

@ -1,3 +1,13 @@
#
# Copyright (c) 2019-2021 Red Hat, Inc.
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# Contributors:
# Red Hat, Inc. - initial API and implementation
apiVersion: apps/v1
kind: Deployment
metadata:
@ -58,7 +68,7 @@ spec:
- name: RELATED_IMAGE_che_tls_secrets_creation_job
value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad
- name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526
value: registry.access.redhat.com/ubi8-minimal:8.4-208
- name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak
@ -105,6 +115,8 @@ spec:
value: che-postgres-secret
- name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME
value: ca-certs
- name: MAX_CONCURRENT_RECONCILES
value: "1"
livenessProbe:
httpGet:
path: /healthz
@ -136,47 +148,6 @@ spec:
requests:
cpu: 100m
memory: 64Mi
- args:
- --enable-leader-election
- --metrics-addr
- '0'
command:
- /usr/local/bin/devworkspace-che-operator
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: devworkspace-che-operator
- name: MAX_CONCURRENT_RECONCILES
value: "1"
- name: CONTROLLER_SERVICE_ACCOUNT_NAME
value: che-operator
- name: RELATED_IMAGE_gateway
value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23
- name: RELATED_IMAGE_gateway_configurer
value: quay.io/che-incubator/configbump:0.1.4
image: quay.io/che-incubator/devworkspace-che-operator:ci
imagePullPolicy: Always
name: devworkspace-che-operator
resources:
limits:
cpu: 100m
memory: 256Mi
requests:
cpu: 100m
memory: 32Mi
securityContext:
privileged: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
hostIPC: false
hostNetwork: false
hostPID: false

View File

@ -31,8 +31,9 @@ rules:
- oauthclients
verbs:
- create
- get
- delete
- deletecollection
- get
- list
- patch
- update
@ -83,16 +84,6 @@ rules:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- list
- create
- watch
- update
- get
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- list
@ -102,17 +93,9 @@ rules:
- get
- delete
- apiGroups:
- authorization.openshift.io
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- get
- create
- update
- delete
- apiGroups:
- authorization.openshift.io
resources:
- rolebindings
verbs:
- get
@ -120,17 +103,9 @@ rules:
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
- authorization.openshift.io
resources:
- roles
verbs:
- get
- create
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- get
@ -144,6 +119,14 @@ rules:
- checlusters/status
- checlusters/finalizers
- checlusters/status
- checlusterbackups
- checlusterbackups/status
- checlusterbackups/finalizers
- checlusterrestores
- checlusterrestores/status
- backupserverconfigurations
- backupserverconfigurations/status
- chebackupserverconfigurations
verbs:
- '*'
- apiGroups:
@ -169,14 +152,6 @@ rules:
- list
- create
- update
- apiGroups:
- ''
resources:
- serviceaccounts
verbs:
- get
- create
- watch
- apiGroups:
- ''
resources:
@ -222,40 +197,24 @@ rules:
- delete
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- get
- list
- create
- patch
- watch
- delete
- apiGroups:
- ''
resources:
- services
verbs:
- list
- create
- delete
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- create
- delete
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- list
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
- delete
- apiGroups:
- ''
resources:
@ -276,11 +235,7 @@ rules:
resources:
- ingresses
verbs:
- list
- create
- watch
- get
- delete
- '*'
- apiGroups:
- networking.k8s.io
resources:
@ -305,6 +260,14 @@ rules:
- subscriptions
verbs:
- get
- apiGroups:
- operators.coreos.com
resources:
- clusterserviceversions
verbs:
- list
- get
- watch
- apiGroups:
- metrics.k8s.io
resources:
@ -324,6 +287,72 @@ rules:
- get
- list
- update
- apiGroups:
- ''
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- nonResourceURLs:
- /metrics
verbs:
- get
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
### CHE-OPERATOR ROLES ONLY: END
# devworkspace-controller-view-workspaces.ClusterRole.yaml
- apiGroups:
@ -594,172 +623,3 @@ rules:
- get
- list
- watch
# devworkspace-che-role.ClusterRole.yaml
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- pods
- secrets
- serviceaccounts
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- services
verbs:
- '*'
- apiGroups:
- apps
resourceNames:
- devworkspace-che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- get
- list
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- org.eclipse.che
resources:
- checlusters
- checlusters/status
- checlusters/finalizers
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- configmap
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- oauth.openshift.io
resources:
- oauthclients
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- '*'
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
# devworkspace-che-metrics-reader.ClusterRole.yaml
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -82,8 +82,10 @@ rules:
- get
- apiGroups:
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
@ -118,12 +120,6 @@ rules:
- get
- list
- watch
- apiGroups:
- che.eclipse.org
resources:
- kubernetesimagepullers
verbs:
- '*'
- apiGroups:
- operators.coreos.com
resources:
@ -139,20 +135,6 @@ rules:
verbs:
- get
- list
### CHE-OPERATOR ROLES ONLY: END
# devworkspace-che-leader-election-role.Role.yaml
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
@ -167,3 +149,46 @@ rules:
- events
verbs:
- create
- apiGroups:
- apps
resourceNames:
- che-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings
verbs:
- '*'
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/finalizers
verbs:
- update
- apiGroups:
- controller.devfile.io
resources:
- devworkspaceroutings/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- oauthclients
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
### CHE-OPERATOR ROLES ONLY: END

View File

@ -0,0 +1,100 @@
kind: DevWorkspace
apiVersion: workspace.devfile.io/v1alpha2
metadata:
name: theia-next
spec:
started: true
routingClass: che
template:
projects:
- name: web-nodejs-sample
git:
remotes:
origin: "https://github.com/che-samples/web-nodejs-sample.git"
components:
### BEGIN Contributions from Theia plugin ###
- name: plugins
volume: {}
- name: theia-ide
attributes:
"app.kubernetes.io/name": che-theia.eclipse.org
"app.kubernetes.io/part-of": che.eclipse.org
"app.kubernetes.io/component": editor
container:
image: "quay.io/eclipse/che-theia:next"
env:
- name: THEIA_PLUGINS
value: local-dir:///plugins
- name: HOSTED_PLUGIN_HOSTNAME
value: 0.0.0.0
- name: HOSTED_PLUGIN_PORT
value: "3130"
- name: THEIA_HOST
value: 0.0.0.0
volumeMounts:
- path: "/plugins"
name: plugins
mountSources: true
memoryLimit: "512M"
endpoints:
- name: "theia"
exposure: public
targetPort: 3100
secure: true
protocol: http
attributes:
type: ide
- name: "webviews"
exposure: public
targetPort: 3100
protocol: http
secure: true
attributes:
type: webview
unique: "true"
- name: "theia-dev"
exposure: public
targetPort: 3130
protocol: http
attributes:
type: ide-dev
- name: "theia-redir-1"
exposure: public
targetPort: 13131
protocol: http
- name: "theia-redir-2"
exposure: public
targetPort: 13132
protocol: http
- name: "theia-redir-3"
exposure: public
targetPort: 13133
protocol: http
- name: che-theia-terminal
attributes:
"app.kubernetes.io/name": che-theia.eclipse.org
"app.kubernetes.io/part-of": che.eclipse.org
"app.kubernetes.io/component": che-theia-terminal
container:
image: "quay.io/eclipse/che-machine-exec:nightly"
command: ['/go/bin/che-machine-exec']
args:
- '--url'
- '0.0.0.0:3333'
- '--pod-selector'
- controller.devfile.io/devworkspace_id=$(DEVWORKSPACE_ID)
endpoints:
- name: "che-theia-terminal"
exposure: public
targetPort: 3333
protocol: ws
secure: true
attributes:
type: collocated-terminal
### END Contributions from che-theia plugin ###
commands:
- id: say-hello
exec:
component: plugin
commandLine: echo "Hello from $(pwd)"
workingDir: ${PROJECTS_ROOT}/project/app

View File

@ -6,4 +6,6 @@ resources:
# - org_v1_chebackupserverconfiguration.yaml
# - org_v1_checlusterbackup.yaml
# - org_v1_checlusterrestore.yaml
# Uncomment to enable a devworkspace sample
# - devworkspace_flattened_theia-nodejs.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@ -94,10 +94,12 @@ type CheClusterReconciler struct {
tests bool
userHandler OpenShiftOAuthUserHandler
permissionChecker PermissionChecker
// the namespace to which to limit the reconciliation. If empty, all namespaces are considered
namespace string
}
// NewReconciler returns a new CheClusterReconciler
func NewReconciler(mgr ctrl.Manager) (*CheClusterReconciler, error) {
func NewReconciler(mgr ctrl.Manager, namespace string) (*CheClusterReconciler, error) {
noncachedClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: mgr.GetScheme()})
if err != nil {
return nil, err
@ -115,6 +117,7 @@ func NewReconciler(mgr ctrl.Manager) (*CheClusterReconciler, error) {
discoveryClient: discoveryClient,
userHandler: NewOpenShiftOAuthUserHandler(noncachedClient),
permissionChecker: &K8sApiPermissionChecker{},
namespace: namespace,
}, nil
}
@ -214,6 +217,10 @@ func (r *CheClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
})
}
if r.namespace != "" {
contollerBuilder.WithEventFilter(util.InNamespaceEventFilter(r.namespace))
}
return contollerBuilder.
For(&orgv1.CheCluster{}).
Complete(r)

View File

@ -18,6 +18,7 @@ import (
"time"
chev1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/pkg/util"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
@ -44,11 +45,13 @@ type ReconcileCheClusterBackup struct {
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
// the namespace to which to limit the reconciliation. If empty, all namespaces are considered
namespace string
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) *ReconcileCheClusterBackup {
return &ReconcileCheClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme()}
func NewReconciler(mgr manager.Manager, namespace string) *ReconcileCheClusterBackup {
return &ReconcileCheClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme(), namespace: namespace}
}
// SetupWithManager sets up the controller with the Manager.
@ -69,9 +72,15 @@ func (r *ReconcileCheClusterBackup) SetupWithManager(mgr ctrl.Manager) error {
},
}
return ctrl.NewControllerManagedBy(mgr).
bldr := ctrl.NewControllerManagedBy(mgr).
Named("checlusterbackup-controller").
Watches(&source.Kind{Type: &chev1.CheClusterBackup{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(backupCRPredicate)).
Watches(&source.Kind{Type: &chev1.CheClusterBackup{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(backupCRPredicate))
if r.namespace != "" {
bldr.WithEventFilter(util.InNamespaceEventFilter(r.namespace))
}
return bldr.
For(&chev1.CheClusterBackup{}).
Complete(r)
}

View File

@ -18,6 +18,7 @@ import (
"time"
chev1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/pkg/util"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
@ -42,11 +43,13 @@ type ReconcileCheClusterRestore struct {
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
// the namespace to which to limit the reconciliation. If empty, all namespaces are considered
namespace string
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) *ReconcileCheClusterRestore {
return &ReconcileCheClusterRestore{client: mgr.GetClient(), scheme: mgr.GetScheme()}
func NewReconciler(mgr manager.Manager, namespace string) *ReconcileCheClusterRestore {
return &ReconcileCheClusterRestore{client: mgr.GetClient(), scheme: mgr.GetScheme(), namespace: namespace}
}
func (r *ReconcileCheClusterRestore) SetupWithManager(mgr ctrl.Manager) error {
@ -66,9 +69,15 @@ func (r *ReconcileCheClusterRestore) SetupWithManager(mgr ctrl.Manager) error {
},
}
return ctrl.NewControllerManagedBy(mgr).
bldr := ctrl.NewControllerManagedBy(mgr).
Named("checlusterrestore-controller").
Watches(&source.Kind{Type: &chev1.CheClusterRestore{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(restoreCRPredicate)).
Watches(&source.Kind{Type: &chev1.CheClusterRestore{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(restoreCRPredicate))
if r.namespace != "" {
bldr.WithEventFilter(util.InNamespaceEventFilter(r.namespace))
}
return bldr.
For(&chev1.CheClusterRestore{}).
Complete(r)
}

View File

@ -0,0 +1,12 @@
= Devworkspace Che controller
This is an import of originally standalone Devworkspace Che operator.
As such many things, that could be shared or reused with/from the rest
of the che-operator codebase, aren't.
This situation will hopefully improve over time as we integrate the two
codebases more and more.
In particular, the `controller/devworkspace/sync` subpackage is more
or less identical to `deploy/sync` and should be replaced by `deploy/sync`
after a careful inspection, if possible.

View File

@ -0,0 +1,459 @@
//
// Copyright (c) 2019-2020 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspace
import (
"context"
"encoding/hex"
stdErrors "errors"
"fmt"
"math/rand"
"reflect"
"strings"
"sync"
"time"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
checluster "github.com/eclipse-che/che-operator/api"
checlusterv1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/api/v2alpha1"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
datasync "github.com/eclipse-che/che-operator/controllers/devworkspace/sync"
"github.com/eclipse-che/che-operator/pkg/deploy"
"github.com/eclipse-che/che-operator/pkg/util"
routev1 "github.com/openshift/api/route/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
log = ctrl.Log.WithName("che")
currentCheInstances = map[client.ObjectKey]v2alpha1.CheCluster{}
cheInstancesAccess = sync.Mutex{}
)
const (
// FinalizerName is the name of the finalizer put on the Che Cluster resources by the controller. Public for testing purposes.
FinalizerName = "checluster.che.eclipse.org"
)
type CheClusterReconciler struct {
client client.Client
scheme *runtime.Scheme
syncer datasync.Syncer
}
// GetCurrentCheClusterInstances returns a map of all che clusters (keyed by their namespaced name)
// the che cluster controller currently knows of. This returns any meaningful data
// only after reconciliation has taken place.
//
// If this method is called from another controller, it effectively couples that controller
// with the che manager controller. Such controller will therefore have to run in the same
// process as the che manager controller. On the other hand, using this method, and somehow
// tolerating its eventual consistency, makes the other controller more efficient such that
// it doesn't have to find the che managers in the cluster (which is what che manager reconciler
// is doing).
//
// If need be, this method can be replaced by a simply calling client.List to get all the che
// managers in the cluster.
func GetCurrentCheClusterInstances() map[client.ObjectKey]v2alpha1.CheCluster {
cheInstancesAccess.Lock()
defer cheInstancesAccess.Unlock()
ret := map[client.ObjectKey]v2alpha1.CheCluster{}
for k, v := range currentCheInstances {
ret[k] = v
}
return ret
}
// New returns a new instance of the Che manager reconciler. This is mainly useful for
// testing because it doesn't set up any watches in the cluster, etc. For that use SetupWithManager.
func New(cl client.Client, scheme *runtime.Scheme) CheClusterReconciler {
return CheClusterReconciler{
client: cl,
scheme: scheme,
syncer: datasync.New(cl, scheme),
}
}
func (r *CheClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.client = mgr.GetClient()
r.scheme = mgr.GetScheme()
r.syncer = datasync.New(r.client, r.scheme)
bld := ctrl.NewControllerManagedBy(mgr).
For(&checlusterv1.CheCluster{}).
Owns(&corev1.Service{}).
Owns(&v1beta1.Ingress{}).
Owns(&corev1.ConfigMap{}).
Owns(&appsv1.Deployment{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Owns(&rbac.Role{}).
Owns(&rbac.RoleBinding{})
if infrastructure.IsOpenShift() {
bld.Owns(&routev1.Route{})
}
return bld.Complete(r)
}
func (r *CheClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
cheInstancesAccess.Lock()
defer cheInstancesAccess.Unlock()
// remove the manager from the shared map for the time of the reconciliation
// we'll add it back if it is successfully reconciled.
// The access to the map is locked for the time of reconciliation so that outside
// callers don't witness this intermediate state.
delete(currentCheInstances, req.NamespacedName)
// make sure we've checked we're in a valid state
currentV1 := &checlusterv1.CheCluster{}
err := r.client.Get(ctx, req.NamespacedName, currentV1)
if err != nil {
if errors.IsNotFound(err) {
// Ok, our current router disappeared...
return ctrl.Result{}, nil
}
// other error - let's requeue
return ctrl.Result{}, err
}
current := checluster.AsV2alpha1(currentV1)
if current.GetDeletionTimestamp() != nil {
return ctrl.Result{}, r.finalize(ctx, current, currentV1)
}
var disabledMessage string
if !r.scheme.IsGroupRegistered("controller.devfile.io") {
disabledMessage = "Devworkspace CRDs are not installed"
}
if disabledMessage == "" && !current.Spec.IsEnabled() {
disabledMessage = "Devworkspace Che is disabled"
}
if disabledMessage != "" {
res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, disabledMessage)
if err != nil {
return res, err
}
currentV1 = &checlusterv1.CheCluster{}
_ = r.client.Get(ctx, req.NamespacedName, currentV1)
return res, nil
}
finalizerUpdated, err := r.ensureFinalizer(ctx, current)
if err != nil {
log.Info("Failed to set a finalizer", "object", req.String())
return ctrl.Result{}, err
} else if finalizerUpdated {
// we've updated the object with a new finalizer, so we will enter another reconciliation loop shortly
// we don't add the manager into the shared map just yet, because we have actually not reconciled it fully.
return ctrl.Result{}, nil
}
// validate the CR
err = r.validate(current)
if err != nil {
log.Info("validation errors", "errors", err.Error())
res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, err.Error())
if err != nil {
return res, err
}
return res, nil
}
// now, finally, the actual reconciliation
var changed bool
var host string
// We are no longer in charge of the gateway, leaving the responsibility for managing it on the che-operator.
// But we need to detect the hostname on which the gateway is exposed so that the rest of our subsystems work.
host, err = r.detectCheHost(ctx, currentV1)
if err != nil {
return ctrl.Result{}, err
}
// setting changed to false, because we jump from inactive directly to established, because we are no longer in
// control of gateway creation
changed = false
workspaceBaseDomain := current.Spec.WorkspaceDomainEndpoints.BaseDomain
if workspaceBaseDomain == "" {
workspaceBaseDomain, err = r.detectOpenShiftRouteBaseDomain(current)
if err != nil {
return ctrl.Result{}, err
}
if workspaceBaseDomain == "" {
res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, "Could not auto-detect the workspaceBaseDomain. Please set it explicitly in the spec.")
if err != nil {
return res, err
}
return res, nil
}
}
res, err := r.updateStatus(ctx, current, currentV1, &changed, host, workspaceBaseDomain, v2alpha1.ClusterPhaseActive, "")
if err != nil {
return res, err
}
// everything went fine and the manager exists, put it back in the shared map
currentCheInstances[req.NamespacedName] = *current
return res, nil
}
func (r *CheClusterReconciler) updateStatus(ctx context.Context, cluster *v2alpha1.CheCluster, v1Cluster *checlusterv1.CheCluster, changed *bool, host string, workspaceDomain string, phase v2alpha1.ClusterPhase, phaseMessage string) (ctrl.Result, error) {
currentPhase := cluster.Status.GatewayPhase
if changed != nil {
if !cluster.Spec.Gateway.IsEnabled() {
cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseInactive
} else if *changed {
cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseInitializing
} else {
cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseEstablished
}
}
cluster.Status.GatewayHost = host
cluster.Status.WorkspaceBaseDomain = workspaceDomain
// set this unconditionally, because the only other value is set using the finalizer
cluster.Status.Phase = phase
cluster.Status.Message = phaseMessage
var err error
if !reflect.DeepEqual(v1Cluster.Status.DevworkspaceStatus, cluster.Status) {
v1Cluster.Status.DevworkspaceStatus = cluster.Status
err = r.client.Status().Update(ctx, v1Cluster)
}
requeue := cluster.Spec.IsEnabled() && (currentPhase == v2alpha1.GatewayPhaseInitializing ||
cluster.Status.Phase != v2alpha1.ClusterPhaseActive)
return ctrl.Result{Requeue: requeue}, err
}
func (r *CheClusterReconciler) validate(cluster *v2alpha1.CheCluster) error {
validationErrors := []string{}
if !infrastructure.IsOpenShift() {
// The validation error messages must correspond to the storage version of the resource, which is currently
// v1...
if cluster.Spec.WorkspaceDomainEndpoints.BaseDomain == "" {
validationErrors = append(validationErrors, "spec.k8s.ingressDomain must be specified")
}
}
if len(validationErrors) > 0 {
message := "The following validation errors were detected:\n"
for _, m := range validationErrors {
message += "- " + m + "\n"
}
return stdErrors.New(message)
}
return nil
}
func (r *CheClusterReconciler) finalize(ctx context.Context, cluster *v2alpha1.CheCluster, v1Cluster *checlusterv1.CheCluster) (err error) {
err = r.gatewayConfigFinalize(ctx, cluster)
if err == nil {
finalizers := []string{}
for i := range cluster.Finalizers {
if cluster.Finalizers[i] != FinalizerName {
finalizers = append(finalizers, cluster.Finalizers[i])
}
}
cluster.Finalizers = finalizers
err = r.client.Update(ctx, checluster.AsV1(cluster))
} else {
cluster.Status.Phase = v2alpha1.ClusterPhasePendingDeletion
cluster.Status.Message = fmt.Sprintf("Finalization has failed: %s", err.Error())
v1Cluster.Status.DevworkspaceStatus = cluster.Status
err = r.client.Status().Update(ctx, v1Cluster)
}
return err
}
func (r *CheClusterReconciler) ensureFinalizer(ctx context.Context, cluster *v2alpha1.CheCluster) (updated bool, err error) {
needsUpdate := true
if cluster.Finalizers != nil {
for i := range cluster.Finalizers {
if cluster.Finalizers[i] == FinalizerName {
needsUpdate = false
break
}
}
} else {
cluster.Finalizers = []string{}
}
if needsUpdate {
cluster.Finalizers = append(cluster.Finalizers, FinalizerName)
err = r.client.Update(ctx, checluster.AsV1(cluster))
}
return needsUpdate, err
}
// Tries to autodetect the route base domain.
func (r *CheClusterReconciler) detectOpenShiftRouteBaseDomain(cluster *v2alpha1.CheCluster) (string, error) {
if !infrastructure.IsOpenShift() {
return "", nil
}
name := "devworkspace-che-test-" + randomSuffix(8)
testRoute := &routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Namespace: cluster.Namespace,
Name: name,
},
Spec: routev1.RouteSpec{
To: routev1.RouteTargetReference{
Kind: "Service",
Name: name,
},
},
}
err := r.client.Create(context.TODO(), testRoute)
if err != nil {
return "", err
}
defer r.client.Delete(context.TODO(), testRoute)
host := testRoute.Spec.Host
prefixToRemove := name + "-" + cluster.Namespace + "."
return strings.TrimPrefix(host, prefixToRemove), nil
}
func randomSuffix(length int) string {
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
arr := make([]byte, (length+1)/2) // to make even-length array so that it is convertible to hex
rnd.Read(arr)
return hex.EncodeToString(arr)
}
func (r *CheClusterReconciler) detectCheHost(ctx context.Context, cluster *checlusterv1.CheCluster) (string, error) {
host := cluster.Spec.Server.CheHost
if host == "" {
expectedLabels := deploy.GetLabels(cluster, deploy.DefaultCheFlavor(cluster))
lbls := labels.SelectorFromSet(expectedLabels)
if util.IsOpenShift {
list := routev1.RouteList{}
err := r.client.List(ctx, &list, &client.ListOptions{
Namespace: cluster.Namespace,
LabelSelector: lbls,
})
if err != nil {
return "", err
}
if len(list.Items) == 0 {
return "", fmt.Errorf("expecting exactly 1 route to match Che gateway labels but found %d", len(list.Items))
}
host = list.Items[0].Spec.Host
} else {
list := v1beta1.IngressList{}
err := r.client.List(ctx, &list, &client.ListOptions{
Namespace: cluster.Namespace,
LabelSelector: lbls,
})
if err != nil {
return "", err
}
if len(list.Items) == 0 {
return "", fmt.Errorf("expecting exactly 1 ingress to match Che gateway labels but found %d", len(list.Items))
}
if len(list.Items[0].Spec.Rules) != 1 {
return "", fmt.Errorf("expecting exactly 1 rule on the Che gateway ingress but found %d. This is a bug", len(list.Items[0].Spec.Rules))
}
host = list.Items[0].Spec.Rules[0].Host
}
}
return host, nil
}
// Checks that there are no devworkspace configurations for the gateway (which would mean running devworkspaces).
// If there are some, an error is returned.
func (r *CheClusterReconciler) gatewayConfigFinalize(ctx context.Context, cluster *v2alpha1.CheCluster) error {
// we need to stop the reconcile if there are devworkspaces handled by it.
// we detect that by the presence of the gateway configmaps in the namespace of the manager
list := corev1.ConfigMapList{}
err := r.client.List(ctx, &list, &client.ListOptions{
Namespace: cluster.Namespace,
LabelSelector: labels.SelectorFromSet(defaults.GetLabelsForComponent(cluster, "gateway-config")),
})
if err != nil {
return err
}
workspaceCount := 0
for _, c := range list.Items {
if c.Annotations[defaults.ConfigAnnotationCheManagerName] == cluster.Name && c.Annotations[defaults.ConfigAnnotationCheManagerNamespace] == cluster.Namespace {
workspaceCount++
}
}
if workspaceCount > 0 {
return fmt.Errorf("there are %d devworkspaces associated with this Che manager", workspaceCount)
}
return nil
}

View File

@ -0,0 +1,576 @@
package devworkspace
import (
"context"
"os"
"testing"
"time"
dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
checluster "github.com/eclipse-che/che-operator/api"
v1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/api/v2alpha1"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
"github.com/eclipse-che/che-operator/controllers/devworkspace/sync"
"github.com/eclipse-che/che-operator/pkg/deploy"
"github.com/eclipse-che/che-operator/pkg/util"
routev1 "github.com/openshift/api/route/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/api/node/v1alpha1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/utils/pointer"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func createTestScheme() *runtime.Scheme {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
scheme := runtime.NewScheme()
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(extensions.AddToScheme(scheme))
utilruntime.Must(corev1.AddToScheme(scheme))
utilruntime.Must(appsv1.AddToScheme(scheme))
utilruntime.Must(rbac.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(dwo.AddToScheme(scheme))
return scheme
}
func TestNoCustomResourceSharedWhenReconcilingNonExistent(t *testing.T) {
// clear the map before the test
for k := range currentCheInstances {
delete(currentCheInstances, k)
}
managerName := "che"
ns := "default"
scheme := createTestScheme()
cl := fake.NewFakeClientWithScheme(scheme)
ctx := context.TODO()
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
// there is nothing in our context, so the map should still be empty
managers := GetCurrentCheClusterInstances()
if len(managers) != 0 {
t.Fatalf("There should have been no managers after a reconcile of a non-existent manager.")
}
// now add some manager and reconcile a non-existent one
cl.Create(ctx, asV1(&v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: managerName + "-not-me",
Namespace: ns,
Finalizers: []string{FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
Enabled: pointer.BoolPtr(false),
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}))
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers = GetCurrentCheClusterInstances()
if len(managers) != 0 {
t.Fatalf("There should have been no managers after a reconcile of a non-existent manager.")
}
}
func TestAddsCustomResourceToSharedMapOnCreate(t *testing.T) {
// clear the map before the test
for k := range currentCheInstances {
delete(currentCheInstances, k)
}
managerName := "che"
ns := "default"
scheme := createTestScheme()
cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: managerName,
Namespace: ns,
Finalizers: []string{FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
Enabled: pointer.BoolPtr(false),
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}))
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers := GetCurrentCheClusterInstances()
if len(managers) != 1 {
t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers))
}
mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if !ok {
t.Fatalf("The map of the current managers doesn't contain the expected one.")
}
if mgr.Name != managerName {
t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName)
}
}
func TestUpdatesCustomResourceInSharedMapOnUpdate(t *testing.T) {
// clear the map before the test
for k := range currentCheInstances {
delete(currentCheInstances, k)
}
managerName := "che"
ns := "default"
scheme := createTestScheme()
cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: managerName,
Namespace: ns,
Finalizers: []string{FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Enabled: pointer.BoolPtr(false),
Host: "over.the.rainbow",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}))
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers := GetCurrentCheClusterInstances()
if len(managers) != 1 {
t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers))
}
mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if !ok {
t.Fatalf("The map of the current managers doesn't contain the expected one.")
}
if mgr.Name != managerName {
t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName)
}
if mgr.Spec.Gateway.Host != "over.the.rainbow" {
t.Fatalf("Unexpected host value: expected: over.the.rainbow, actual: %s", mgr.Spec.Gateway.Host)
}
// now update the manager and reconcile again. See that the map contains the updated value
mgrInCluster := v1.CheCluster{}
cl.Get(context.TODO(), client.ObjectKey{Name: managerName, Namespace: ns}, &mgrInCluster)
// to be able to update, we need to set the resource version
mgr.SetResourceVersion(mgrInCluster.GetResourceVersion())
mgr.Spec.Gateway.Host = "over.the.shoulder"
err = cl.Update(context.TODO(), asV1(&mgr))
if err != nil {
t.Fatalf("Failed to update. Wat? %s", err)
}
// before the reconcile, the map still should containe the old value
managers = GetCurrentCheClusterInstances()
mgr, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if !ok {
t.Fatalf("The map of the current managers doesn't contain the expected one.")
}
if mgr.Name != managerName {
t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName)
}
if mgr.Spec.Gateway.Host != "over.the.rainbow" {
t.Fatalf("Unexpected host value: expected: over.the.rainbow, actual: %s", mgr.Spec.Gateway.Host)
}
// now reconcile and see that the value in the map is now updated
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers = GetCurrentCheClusterInstances()
mgr, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if !ok {
t.Fatalf("The map of the current managers doesn't contain the expected one.")
}
if mgr.Name != managerName {
t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName)
}
if mgr.Spec.Gateway.Host != "over.the.shoulder" {
t.Fatalf("Unexpected host value: expected: over.the.shoulder, actual: %s", mgr.Spec.Gateway.Host)
}
}
func TestRemovesCustomResourceFromSharedMapOnDelete(t *testing.T) {
// clear the map before the test
for k := range currentCheInstances {
delete(currentCheInstances, k)
}
managerName := "che"
ns := "default"
scheme := createTestScheme()
cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: managerName,
Namespace: ns,
Finalizers: []string{FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
Enabled: pointer.BoolPtr(false),
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}))
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers := GetCurrentCheClusterInstances()
if len(managers) != 1 {
t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers))
}
mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if !ok {
t.Fatalf("The map of the current managers doesn't contain the expected one.")
}
if mgr.Name != managerName {
t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName)
}
cl.Delete(context.TODO(), asV1(&mgr))
// now reconcile and see that the value is no longer in the map
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
managers = GetCurrentCheClusterInstances()
_, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}]
if ok {
t.Fatalf("The map of the current managers should no longer contain the manager after it has been deleted.")
}
}
func TestCustomResourceFinalization(t *testing.T) {
managerName := "che"
ns := "default"
scheme := createTestScheme()
ctx := context.TODO()
cl := fake.NewFakeClientWithScheme(scheme,
asV1(&v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: managerName,
Namespace: ns,
Finalizers: []string{FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}),
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "ws1",
Namespace: ns,
Annotations: map[string]string{
defaults.ConfigAnnotationCheManagerName: managerName,
defaults.ConfigAnnotationCheManagerNamespace: ns,
},
Labels: defaults.GetLabelsFromNames(managerName, "gateway-config"),
},
})
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
// check that the reconcile loop added the finalizer
manager := v1.CheCluster{}
err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager)
if err != nil {
t.Fatalf("Failed to obtain the manager from the fake client: %s", err)
}
if len(manager.Finalizers) != 1 {
t.Fatalf("Expected a single finalizer on the manager but found: %d", len(manager.Finalizers))
}
if manager.Finalizers[0] != FinalizerName {
t.Fatalf("Expected a finalizer called %s but got %s", FinalizerName, manager.Finalizers[0])
}
// try to delete the manager and check that the configmap disallows that and that the status of the manager is updated
manager.DeletionTimestamp = &metav1.Time{Time: time.Now()}
err = cl.Update(ctx, &manager)
if err != nil {
t.Fatalf("Failed to update the manager in the fake client: %s", err)
}
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
manager = v1.CheCluster{}
err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager)
if err != nil {
t.Fatalf("Failed to obtain the manager from the fake client: %s", err)
}
if len(manager.Finalizers) != 1 {
t.Fatalf("There should have been a finalizer on the manager after a failed finalization attempt")
}
if manager.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhasePendingDeletion {
t.Fatalf("Expected the manager to be in the pending deletion phase but it is: %s", manager.Status.DevworkspaceStatus.Phase)
}
if len(manager.Status.DevworkspaceStatus.Message) == 0 {
t.Fatalf("Expected an non-empty message about the failed finalization in the manager status")
}
// now remove the config map and check that the finalization proceeds
err = cl.Delete(ctx, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "ws1",
Namespace: ns,
},
})
if err != nil {
t.Fatalf("Failed to delete the test configmap: %s", err)
}
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
manager = v1.CheCluster{}
err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager)
if err != nil {
t.Fatalf("Failed to obtain the manager from the fake client: %s", err)
}
if len(manager.Finalizers) != 0 {
t.Fatalf("The finalizers should be cleared after the finalization success but there were still some: %d", len(manager.Finalizers))
}
}
// This test should be removed if we are again in charge of gateway creation.
func TestExternalGatewayDetection(t *testing.T) {
origFlavor := os.Getenv("CHE_FLAVOR")
t.Cleanup(func() {
os.Setenv("CHE_FLAVOR", origFlavor)
})
os.Setenv("CHE_FLAVOR", "test-che")
scheme := createTestScheme()
clusterName := "eclipse-che"
ns := "default"
v2cluster := &v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: ns,
},
Spec: v2alpha1.CheClusterSpec{
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}
onKubernetes(func() {
v1Cluster := asV1(v2cluster)
cl := fake.NewFakeClientWithScheme(scheme,
v1Cluster,
&extensions.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress",
Namespace: ns,
Labels: deploy.GetLabels(v1Cluster, "test-che"),
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
{
Host: "ingress.host",
},
},
},
},
)
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
// first reconcile sets the finalizer, second reconcile actually finishes the process
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
persisted := v1.CheCluster{}
if err := cl.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: ns}, &persisted); err != nil {
t.Fatal(err)
}
if persisted.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhaseActive {
t.Fatalf("Unexpected cluster state: %v", persisted.Status.DevworkspaceStatus.Phase)
}
if persisted.Status.DevworkspaceStatus.GatewayHost != "ingress.host" {
t.Fatalf("Unexpected gateway host: %v", persisted.Status.DevworkspaceStatus.GatewayHost)
}
})
onOpenShift(func() {
v1Cluster := asV1(v2cluster)
cl := fake.NewFakeClientWithScheme(scheme,
v1Cluster,
&routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: "route",
Namespace: ns,
Labels: deploy.GetLabels(v1Cluster, "test-che"),
},
Spec: routev1.RouteSpec{
Host: "route.host",
},
},
)
reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)}
// first reconcile sets the finalizer, second reconcile actually finishes the process
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}})
if err != nil {
t.Fatalf("Failed to reconcile che manager with error: %s", err)
}
persisted := v1.CheCluster{}
if err := cl.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: ns}, &persisted); err != nil {
t.Fatal(err)
}
if persisted.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhaseActive {
t.Fatalf("Unexpected cluster state: %v", persisted.Status.DevworkspaceStatus.Phase)
}
if persisted.Status.DevworkspaceStatus.GatewayHost != "route.host" {
t.Fatalf("Unexpected gateway host: %v", persisted.Status.DevworkspaceStatus.GatewayHost)
}
})
}
func asV1(v2Obj *v2alpha1.CheCluster) *v1.CheCluster {
return checluster.AsV1(v2Obj)
}
func onKubernetes(f func()) {
isOpenShift := util.IsOpenShift
isOpenShift4 := util.IsOpenShift4
util.IsOpenShift = false
util.IsOpenShift4 = false
f()
util.IsOpenShift = isOpenShift
util.IsOpenShift4 = isOpenShift4
}
func onOpenShift(f func()) {
isOpenShift := util.IsOpenShift
isOpenShift4 := util.IsOpenShift4
util.IsOpenShift = true
util.IsOpenShift4 = true
f()
util.IsOpenShift = isOpenShift
util.IsOpenShift4 = isOpenShift4
}

View File

@ -0,0 +1,107 @@
package defaults
import (
"os"
"runtime"
"github.com/eclipse-che/che-operator/api/v2alpha1"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
gatewayImageEnvVarName = "RELATED_IMAGE_gateway"
gatewayConfigurerImageEnvVarName = "RELATED_IMAGE_gateway_configurer"
defaultGatewayImage = "quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23"
defaultGatewayConfigurerImage = "quay.io/che-incubator/configbump:0.1.4"
configAnnotationPrefix = "che.routing.controller.devfile.io/"
ConfigAnnotationCheManagerName = configAnnotationPrefix + "che-name"
ConfigAnnotationCheManagerNamespace = configAnnotationPrefix + "che-namespace"
ConfigAnnotationDevWorkspaceRoutingName = configAnnotationPrefix + "devworkspacerouting-name"
ConfigAnnotationDevWorkspaceRoutingNamespace = configAnnotationPrefix + "devworkspacerouting-namespace"
ConfigAnnotationEndpointName = configAnnotationPrefix + "endpoint-name"
ConfigAnnotationComponentName = configAnnotationPrefix + "component-name"
)
var (
log = ctrl.Log.WithName("defaults")
DefaultIngressAnnotations = map[string]string{
"kubernetes.io/ingress.class": "nginx",
"nginx.ingress.kubernetes.io/proxy-read-timeout": "3600",
"nginx.ingress.kubernetes.io/proxy-connect-timeout": "3600",
"nginx.ingress.kubernetes.io/ssl-redirect": "true",
}
// If this looks weirdly out of place to you from all other labels, then you're completely right!
// These labels are the default ones used by che-operator and Che7. Let's keep the defaults
// the same for the ease of translation...
defaultGatewayConfigLabels = map[string]string{
"app": "che",
"component": "che-gateway-config",
}
)
func GetGatewayWorkpaceConfigMapName(workspaceID string) string {
return workspaceID
}
func GetLabelsForComponent(cluster *v2alpha1.CheCluster, component string) map[string]string {
return GetLabelsFromNames(cluster.Name, component)
}
func GetLabelsFromNames(appName string, component string) map[string]string {
return AddStandardLabelsFromNames(appName, component, map[string]string{})
}
func AddStandardLabelsForComponent(cluster *v2alpha1.CheCluster, component string, labels map[string]string) map[string]string {
return AddStandardLabelsFromNames(cluster.Name, component, labels)
}
func AddStandardLabelsFromNames(appName string, component string, labels map[string]string) map[string]string {
labels["app.kubernetes.io/name"] = appName
labels["app.kubernetes.io/part-of"] = appName
labels["app.kubernetes.io/component"] = component
return labels
}
func GetGatewayImage() string {
return read(gatewayImageEnvVarName, defaultGatewayImage)
}
func GetGatewayConfigurerImage() string {
return read(gatewayConfigurerImageEnvVarName, defaultGatewayConfigurerImage)
}
func GetIngressAnnotations(cluster *v2alpha1.CheCluster) map[string]string {
if len(cluster.Spec.K8s.IngressAnnotations) > 0 {
return cluster.Spec.K8s.IngressAnnotations
}
return DefaultIngressAnnotations
}
func GetGatewayWorkspaceConfigMapLabels(cluster *v2alpha1.CheCluster) map[string]string {
if len(cluster.Spec.Gateway.ConfigLabels) > 0 {
return cluster.Spec.Gateway.ConfigLabels
}
return defaultGatewayConfigLabels
}
func read(varName string, fallback string) string {
ret := os.Getenv(varName)
if len(ret) == 0 {
ret = os.Getenv(archDependent(varName))
if len(ret) == 0 {
log.Info("Failed to read the default value from the environment. Will use the hardcoded default value.", "envvar", varName, "value", fallback)
ret = fallback
}
}
return ret
}
func archDependent(envVarName string) string {
return envVarName + "_" + runtime.GOARCH
}

View File

@ -0,0 +1,519 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solver
import (
"context"
"fmt"
"path"
"strings"
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers"
"github.com/devfile/devworkspace-operator/pkg/common"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
"github.com/eclipse-che/che-operator/api/v2alpha1"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
"github.com/eclipse-che/che-operator/controllers/devworkspace/sync"
"github.com/google/go-cmp/cmp/cmpopts"
routeV1 "github.com/openshift/api/route/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
)
const (
uniqueEndpointAttributeName = "unique"
urlRewriteSupportedEndpointAttributeName = "urlRewriteSupported"
endpointURLPrefixPattern = "/%s/%s/%d"
// note - che-theia DEPENDS on this format - we should not change this unless crosschecked with the che-theia impl
uniqueEndpointURLPrefixPattern = "/%s/%s/%s"
)
var (
configMapDiffOpts = cmpopts.IgnoreFields(corev1.ConfigMap{}, "TypeMeta", "ObjectMeta")
)
// keys are port numbers, values are maps where keys are endpoint names (in case we need more than 1 endpoint for a single port) and values
// contain info about the intended endpoint scheme and the order in which the port is defined (used for unique naming)
type portMapping map[int32]map[string]portMappingValue
type portMappingValue struct {
endpointScheme string
order int
}
func (c *CheRoutingSolver) cheSpecObjects(cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, workspaceMeta solvers.DevWorkspaceMetadata) (solvers.RoutingObjects, error) {
objs := solvers.RoutingObjects{}
objs.Services = solvers.GetDiscoverableServicesForEndpoints(routing.Spec.Endpoints, workspaceMeta)
commonService := solvers.GetServiceForEndpoints(routing.Spec.Endpoints, workspaceMeta, false, dw.PublicEndpointExposure, dw.InternalEndpointExposure)
if commonService != nil {
objs.Services = append(objs.Services, *commonService)
}
annos := map[string]string{}
annos[defaults.ConfigAnnotationCheManagerName] = cheManager.Name
annos[defaults.ConfigAnnotationCheManagerNamespace] = cheManager.Namespace
additionalLabels := defaults.GetLabelsForComponent(cheManager, "exposure")
for i := range objs.Services {
// need to use a ref otherwise s would be a copy
s := &objs.Services[i]
if s.Labels == nil {
s.Labels = map[string]string{}
}
for k, v := range additionalLabels {
if len(s.Labels[k]) == 0 {
s.Labels[k] = v
}
}
if s.Annotations == nil {
s.Annotations = map[string]string{}
}
for k, v := range annos {
if len(s.Annotations[k]) == 0 {
s.Annotations[k] = v
}
}
}
// k, now we have to create our own objects for configuring the gateway
configMaps, err := c.getGatewayConfigsAndFillRoutingObjects(cheManager, workspaceMeta.DevWorkspaceId, routing, &objs)
if err != nil {
return solvers.RoutingObjects{}, err
}
syncer := sync.New(c.client, c.scheme)
for _, cm := range configMaps {
_, _, err := syncer.Sync(context.TODO(), nil, &cm, configMapDiffOpts)
if err != nil {
return solvers.RoutingObjects{}, err
}
}
return objs, nil
}
func (c *CheRoutingSolver) cheExposedEndpoints(manager *v2alpha1.CheCluster, workspaceID string, endpoints map[string]dwo.EndpointList, routingObj solvers.RoutingObjects) (exposedEndpoints map[string]dwo.ExposedEndpointList, ready bool, err error) {
if manager.Status.GatewayPhase == v2alpha1.GatewayPhaseInitializing {
return nil, false, nil
}
gatewayHost := manager.Status.GatewayHost
exposed := map[string]dwo.ExposedEndpointList{}
for machineName, endpoints := range endpoints {
exposedEndpoints := dwo.ExposedEndpointList{}
for _, endpoint := range endpoints {
if endpoint.Exposure != dw.PublicEndpointExposure {
continue
}
scheme := determineEndpointScheme(manager.Spec.Gateway.IsEnabled(), endpoint)
if !isExposableScheme(scheme) {
// we cannot expose non-http endpoints publicly, because ingresses/routes only support http(s)
continue
}
// try to find the endpoint in the ingresses/routes first. If it is there, it is exposed on a subdomain
// otherwise it is exposed through the gateway
var endpointURL string
if infrastructure.IsOpenShift() {
route := findRouteForEndpoint(machineName, endpoint, &routingObj)
if route != nil {
endpointURL = path.Join(route.Spec.Host, endpoint.Path)
}
} else {
ingress := findIngressForEndpoint(machineName, endpoint, &routingObj)
if ingress != nil {
endpointURL = path.Join(ingress.Spec.Rules[0].Host, endpoint.Path)
}
}
if endpointURL == "" {
if !manager.Spec.Gateway.IsEnabled() {
return map[string]dwo.ExposedEndpointList{}, false, fmt.Errorf("couldn't find an ingress/route for an endpoint `%s` in workspace `%s`, this is a bug", endpoint.Name, workspaceID)
}
if gatewayHost == "" {
// the gateway has not yet established the host
return map[string]dwo.ExposedEndpointList{}, false, nil
}
publicURLPrefix := getPublicURLPrefixForEndpoint(workspaceID, machineName, endpoint)
endpointURL = path.Join(gatewayHost, publicURLPrefix, endpoint.Path)
}
publicURL := scheme + "://" + endpointURL
// path.Join() removes the trailing slashes, so make sure to reintroduce that if required.
if endpoint.Path == "" || strings.HasSuffix(endpoint.Path, "/") {
publicURL = publicURL + "/"
}
exposedEndpoints = append(exposedEndpoints, dwo.ExposedEndpoint{
Name: endpoint.Name,
Url: publicURL,
Attributes: endpoint.Attributes,
})
}
exposed[machineName] = exposedEndpoints
}
return exposed, true, nil
}
func isExposableScheme(scheme string) bool {
return strings.HasPrefix(scheme, "http") || strings.HasPrefix(scheme, "ws")
}
func secureScheme(scheme string) string {
if scheme == "http" {
return "https"
} else if scheme == "ws" {
return "wss"
} else {
return scheme
}
}
func isSecureScheme(scheme string) bool {
return scheme == "https" || scheme == "wss"
}
func (c *CheRoutingSolver) getGatewayConfigsAndFillRoutingObjects(cheManager *v2alpha1.CheCluster, workspaceID string, routing *dwo.DevWorkspaceRouting, objs *solvers.RoutingObjects) ([]corev1.ConfigMap, error) {
restrictedAnno, setRestrictedAnno := routing.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation]
labels := defaults.AddStandardLabelsForComponent(cheManager, "gateway-config", defaults.GetGatewayWorkspaceConfigMapLabels(cheManager))
labels[constants.DevWorkspaceIDLabel] = workspaceID
if setRestrictedAnno {
labels[constants.DevWorkspaceRestrictedAccessAnnotation] = restrictedAnno
}
configMap := corev1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Name: defaults.GetGatewayWorkpaceConfigMapName(workspaceID),
Namespace: cheManager.Namespace,
Labels: labels,
Annotations: map[string]string{
defaults.ConfigAnnotationDevWorkspaceRoutingName: routing.Name,
defaults.ConfigAnnotationDevWorkspaceRoutingNamespace: routing.Namespace,
},
},
Data: map[string]string{},
}
config := traefikConfig{
HTTP: traefikConfigHTTP{
Routers: map[string]traefikConfigRouter{},
Services: map[string]traefikConfigService{},
Middlewares: map[string]traefikConfigMiddleware{},
},
}
// we just need something to make the route names unique.. We also need to make the names as short as possible while
// being relatable to the workspaceID by mere human inspection. So let's just suffix the workspaceID with a "unique"
// suffix, the easiest of which is the iteration order in the map.
// Note that this means that the endpoints might get a different route/ingress name on each workspace start because
// the iteration order is not guaranteed in Go maps. If we want stable ingress/route names for the endpoints, we need
// to devise a different algorithm to produce them. Some kind of hash of workspaceID, component name, endpoint name and port
// might work but will not be relatable to the workspace ID just by looking at it anymore.
order := 0
if infrastructure.IsOpenShift() {
exposer := &RouteExposer{}
if err := exposer.initFrom(context.TODO(), c.client, cheManager, routing); err != nil {
return []corev1.ConfigMap{}, err
}
exposeAllEndpoints(&order, cheManager, routing, &config, objs, func(info *EndpointInfo) {
route := exposer.getRouteForService(info)
objs.Routes = append(objs.Routes, route)
})
} else {
exposer := &IngressExposer{}
if err := exposer.initFrom(context.TODO(), c.client, cheManager, routing, defaults.GetIngressAnnotations(cheManager)); err != nil {
return []corev1.ConfigMap{}, err
}
exposeAllEndpoints(&order, cheManager, routing, &config, objs, func(info *EndpointInfo) {
ingress := exposer.getIngressForService(info)
objs.Ingresses = append(objs.Ingresses, ingress)
})
}
if len(config.HTTP.Routers) > 0 {
contents, err := yaml.Marshal(config)
if err != nil {
return []corev1.ConfigMap{}, err
}
configMap.Data[workspaceID+".yml"] = string(contents)
return []corev1.ConfigMap{configMap}, nil
}
return []corev1.ConfigMap{}, nil
}
func exposeAllEndpoints(order *int, cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, config *traefikConfig, objs *solvers.RoutingObjects, ingressExpose func(*EndpointInfo)) {
info := &EndpointInfo{}
for componentName, endpoints := range routing.Spec.Endpoints {
info.componentName = componentName
singlehostPorts, multihostPorts := classifyEndpoints(cheManager.Spec.Gateway.IsEnabled(), order, &endpoints)
addToTraefikConfig(routing.Namespace, routing.Spec.DevWorkspaceId, componentName, singlehostPorts, config)
for port, names := range multihostPorts {
backingService := findServiceForPort(port, objs)
for endpointName, val := range names {
info.endpointName = endpointName
info.order = val.order
info.port = port
info.scheme = val.endpointScheme
info.service = backingService
ingressExpose(info)
}
}
}
}
func getTrackedEndpointName(endpoint *dw.Endpoint) string {
name := ""
if endpoint.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" {
name = endpoint.Name
}
return name
}
// we need to support unique endpoints - so 1 port can actually be accessible
// multiple times, each time using a different resulting external URL.
// non-unique endpoints are all represented using a single external URL
func classifyEndpoints(gatewayEnabled bool, order *int, endpoints *dwo.EndpointList) (singlehostPorts portMapping, multihostPorts portMapping) {
singlehostPorts = portMapping{}
multihostPorts = portMapping{}
for _, e := range *endpoints {
if e.Exposure != dw.PublicEndpointExposure {
continue
}
i := int32(e.TargetPort)
name := ""
if e.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" {
name = e.Name
}
ports := multihostPorts
if gatewayEnabled && e.Attributes.GetString(urlRewriteSupportedEndpointAttributeName, nil) == "true" {
ports = singlehostPorts
}
if ports[i] == nil {
ports[i] = map[string]portMappingValue{}
}
if _, ok := ports[i][name]; !ok {
ports[i][name] = portMappingValue{
order: *order,
endpointScheme: determineEndpointScheme(gatewayEnabled, e),
}
*order = *order + 1
}
}
return
}
func addToTraefikConfig(namespace string, workspaceID string, machineName string, portMapping portMapping, cfg *traefikConfig) {
rtrs := cfg.HTTP.Routers
srvcs := cfg.HTTP.Services
mdls := cfg.HTTP.Middlewares
for port, names := range portMapping {
for endpointName := range names {
name := getEndpointExposingObjectName(machineName, workspaceID, port, endpointName)
var prefix string
var serviceURL string
prefix = getPublicURLPrefix(workspaceID, machineName, port, endpointName)
serviceURL = getServiceURL(port, workspaceID, namespace)
rtrs[name] = traefikConfigRouter{
Rule: fmt.Sprintf("PathPrefix(`%s`)", prefix),
Service: name,
Middlewares: []string{name},
Priority: 100,
}
srvcs[name] = traefikConfigService{
LoadBalancer: traefikConfigLoadbalancer{
Servers: []traefikConfigLoadbalancerServer{
{
URL: serviceURL,
},
},
},
}
mdls[name] = traefikConfigMiddleware{
StripPrefix: traefikConfigStripPrefix{
Prefixes: []string{prefix},
},
}
}
}
}
func findServiceForPort(port int32, objs *solvers.RoutingObjects) *corev1.Service {
for i := range objs.Services {
svc := &objs.Services[i]
for j := range svc.Spec.Ports {
if svc.Spec.Ports[j].Port == port {
return svc
}
}
}
return nil
}
func findIngressForEndpoint(machineName string, endpoint dw.Endpoint, objs *solvers.RoutingObjects) *v1beta1.Ingress {
for i := range objs.Ingresses {
ingress := &objs.Ingresses[i]
if ingress.Annotations[defaults.ConfigAnnotationComponentName] != machineName ||
ingress.Annotations[defaults.ConfigAnnotationEndpointName] != getTrackedEndpointName(&endpoint) {
continue
}
for r := range ingress.Spec.Rules {
rule := ingress.Spec.Rules[r]
for p := range rule.HTTP.Paths {
path := rule.HTTP.Paths[p]
if path.Backend.ServicePort.IntVal == int32(endpoint.TargetPort) {
return ingress
}
}
}
}
return nil
}
func findRouteForEndpoint(machineName string, endpoint dw.Endpoint, objs *solvers.RoutingObjects) *routeV1.Route {
service := findServiceForPort(int32(endpoint.TargetPort), objs)
for r := range objs.Routes {
route := &objs.Routes[r]
if route.Annotations[defaults.ConfigAnnotationComponentName] == machineName &&
route.Annotations[defaults.ConfigAnnotationEndpointName] == getTrackedEndpointName(&endpoint) &&
route.Spec.To.Kind == "Service" &&
route.Spec.To.Name == service.Name &&
route.Spec.Port.TargetPort.IntValue() == endpoint.TargetPort {
return route
}
}
return nil
}
func (c *CheRoutingSolver) cheRoutingFinalize(cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting) error {
configs := &corev1.ConfigMapList{}
selector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId))
if err != nil {
return err
}
listOpts := &client.ListOptions{
Namespace: cheManager.Namespace,
LabelSelector: selector,
}
err = c.client.List(context.TODO(), configs, listOpts)
if err != nil {
return err
}
for _, cm := range configs.Items {
err = c.client.Delete(context.TODO(), &cm)
if err != nil {
return err
}
}
return nil
}
func getServiceURL(port int32, workspaceID string, workspaceNamespace string) string {
// the default .cluster.local suffix of the internal domain names seems to be configurable, so let's just
// not use it so we don't have to know about it...
return fmt.Sprintf("http://%s.%s.svc:%d", common.ServiceName(workspaceID), workspaceNamespace, port)
}
func getPublicURLPrefixForEndpoint(workspaceID string, machineName string, endpoint dw.Endpoint) string {
endpointName := ""
if endpoint.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" {
endpointName = endpoint.Name
}
return getPublicURLPrefix(workspaceID, machineName, int32(endpoint.TargetPort), endpointName)
}
func getPublicURLPrefix(workspaceID string, machineName string, port int32, uniqueEndpointName string) string {
if uniqueEndpointName == "" {
return fmt.Sprintf(endpointURLPrefixPattern, workspaceID, machineName, port)
}
return fmt.Sprintf(uniqueEndpointURLPrefixPattern, workspaceID, machineName, uniqueEndpointName)
}
func determineEndpointScheme(gatewayEnabled bool, e dw.Endpoint) string {
var scheme string
if e.Protocol == "" {
scheme = "http"
} else {
scheme = string(e.Protocol)
}
upgradeToSecure := e.Secure
// gateway is always on HTTPS, so if the endpoint is served through the gateway, we need to use the TLS'd variant.
if gatewayEnabled && e.Attributes.GetString(urlRewriteSupportedEndpointAttributeName, nil) == "true" {
upgradeToSecure = true
}
if upgradeToSecure {
scheme = secureScheme(scheme)
}
return scheme
}

View File

@ -0,0 +1,622 @@
package solver
import (
"context"
"fmt"
"strings"
"testing"
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/devfile/api/v2/pkg/attributes"
dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
org "github.com/eclipse-che/che-operator/api"
v1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/api/v2alpha1"
controller "github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
routev1 "github.com/openshift/api/route/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbac "k8s.io/api/rbac/v1"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"
)
func createTestScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
utilruntime.Must(extensions.AddToScheme(scheme))
utilruntime.Must(corev1.AddToScheme(scheme))
utilruntime.Must(appsv1.AddToScheme(scheme))
utilruntime.Must(rbac.AddToScheme(scheme))
utilruntime.Must(dw.AddToScheme(scheme))
utilruntime.Must(dwo.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
return scheme
}
func getSpecObjectsForManager(t *testing.T, mgr *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, additionalInitialObjects ...runtime.Object) (client.Client, solvers.RoutingSolver, solvers.RoutingObjects) {
scheme := createTestScheme()
allObjs := []runtime.Object{asV1(mgr)}
for i := range additionalInitialObjects {
allObjs = append(allObjs, additionalInitialObjects[i])
}
cl := fake.NewFakeClientWithScheme(scheme, allObjs...)
solver, err := Getter(scheme).GetSolver(cl, "che")
if err != nil {
t.Fatal(err)
}
meta := solvers.DevWorkspaceMetadata{
DevWorkspaceId: routing.Spec.DevWorkspaceId,
Namespace: routing.GetNamespace(),
PodSelector: routing.Spec.PodSelector,
}
// we need to do 1 round of che manager reconciliation so that the solver gets initialized
cheRecon := controller.New(cl, scheme)
_, err = cheRecon.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: mgr.Name, Namespace: mgr.Namespace}})
if err != nil {
t.Fatal(err)
}
objs, err := solver.GetSpecObjects(routing, meta)
if err != nil {
t.Fatal(err)
}
// now we need a second round of che manager reconciliation so that it proclaims the che gateway as established
cheRecon.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "che", Namespace: "ns"}})
return cl, solver, objs
}
func getSpecObjects(t *testing.T, routing *dwo.DevWorkspaceRouting) (client.Client, solvers.RoutingSolver, solvers.RoutingObjects) {
return getSpecObjectsForManager(t, &v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "che",
Namespace: "ns",
Finalizers: []string{controller.FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
},
}, routing)
}
func subdomainDevWorkspaceRouting() *dwo.DevWorkspaceRouting {
return &dwo.DevWorkspaceRouting{
ObjectMeta: metav1.ObjectMeta{
Name: "routing",
Namespace: "ws",
},
Spec: dwo.DevWorkspaceRoutingSpec{
DevWorkspaceId: "wsid",
RoutingClass: "che",
Endpoints: map[string]dwo.EndpointList{
"m1": {
{
Name: "e1",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
Protocol: "https",
Path: "/1/",
},
{
Name: "e2",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
Protocol: "http",
Path: "/2.js",
Secure: true,
},
{
Name: "e3",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
},
},
},
},
}
}
func relocatableDevWorkspaceRouting() *dwo.DevWorkspaceRouting {
return &dwo.DevWorkspaceRouting{
ObjectMeta: metav1.ObjectMeta{
Name: "routing",
Namespace: "ws",
},
Spec: dwo.DevWorkspaceRoutingSpec{
DevWorkspaceId: "wsid",
RoutingClass: "che",
Endpoints: map[string]dwo.EndpointList{
"m1": {
{
Name: "e1",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
Protocol: "https",
Path: "/1/",
Attributes: attributes.Attributes{
urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")},
},
},
{
Name: "e2",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
Protocol: "http",
Path: "/2.js",
Secure: true,
Attributes: attributes.Attributes{
urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")},
},
},
{
Name: "e3",
TargetPort: 9999,
Exposure: dw.PublicEndpointExposure,
Attributes: attributes.Attributes{
urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")},
},
},
},
},
},
}
}
func TestCreateRelocatedObjects(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
cl, _, objs := getSpecObjects(t, relocatableDevWorkspaceRouting())
t.Run("noIngresses", func(t *testing.T) {
if len(objs.Ingresses) != 0 {
t.Error()
}
})
t.Run("noRoutes", func(t *testing.T) {
if len(objs.Routes) != 0 {
t.Error()
}
})
t.Run("noPodAdditions", func(t *testing.T) {
if objs.PodAdditions != nil {
t.Error()
}
})
for i := range objs.Services {
t.Run(fmt.Sprintf("service-%d", i), func(t *testing.T) {
svc := &objs.Services[i]
if svc.Annotations[defaults.ConfigAnnotationCheManagerName] != "che" {
t.Errorf("The name of the associated che manager should have been recorded in the service annotation")
}
if svc.Annotations[defaults.ConfigAnnotationCheManagerNamespace] != "ns" {
t.Errorf("The namespace of the associated che manager should have been recorded in the service annotation")
}
if svc.Labels[constants.DevWorkspaceIDLabel] != "wsid" {
t.Errorf("The workspace ID should be recorded in the service labels")
}
})
}
t.Run("traefikConfig", func(t *testing.T) {
cms := &corev1.ConfigMapList{}
cl.List(context.TODO(), cms)
if len(cms.Items) != 1 {
t.Errorf("there should be 1 configmap created for the gateway config of the workspace but there were: %d", len(cms.Items))
}
var workspaceCfg *corev1.ConfigMap
for _, cfg := range cms.Items {
if cfg.Name == "wsid" {
workspaceCfg = &cfg
}
}
if workspaceCfg == nil {
t.Fatalf("traefik configuration for the workspace not found")
}
traefikWorkspaceConfig := workspaceCfg.Data["wsid.yml"]
if len(traefikWorkspaceConfig) == 0 {
t.Fatal("No traefik config file found in the workspace config configmap")
}
workspaceConfig := traefikConfig{}
if err := yaml.Unmarshal([]byte(traefikWorkspaceConfig), &workspaceConfig); err != nil {
t.Fatal(err)
}
if len(workspaceConfig.HTTP.Routers) != 1 {
t.Fatalf("Expected exactly one traefik router but got %d", len(workspaceConfig.HTTP.Routers))
}
if _, ok := workspaceConfig.HTTP.Routers["wsid-m1-9999"]; !ok {
t.Fatal("traefik config doesn't contain expected workspace configuration")
}
})
}
func TestCreateSubDomainObjects(t *testing.T) {
testCommon := func(infra infrastructure.Type) solvers.RoutingObjects {
infrastructure.InitializeForTesting(infra)
cl, _, objs := getSpecObjects(t, subdomainDevWorkspaceRouting())
t.Run("noPodAdditions", func(t *testing.T) {
if objs.PodAdditions != nil {
t.Error()
}
})
for i := range objs.Services {
t.Run(fmt.Sprintf("service-%d", i), func(t *testing.T) {
svc := &objs.Services[i]
if svc.Annotations[defaults.ConfigAnnotationCheManagerName] != "che" {
t.Errorf("The name of the associated che manager should have been recorded in the service annotation")
}
if svc.Annotations[defaults.ConfigAnnotationCheManagerNamespace] != "ns" {
t.Errorf("The namespace of the associated che manager should have been recorded in the service annotation")
}
if svc.Labels[constants.DevWorkspaceIDLabel] != "wsid" {
t.Errorf("The workspace ID should be recorded in the service labels")
}
})
}
t.Run("noWorkspaceTraefikConfig", func(t *testing.T) {
cms := &corev1.ConfigMapList{}
cl.List(context.TODO(), cms)
if len(cms.Items) != 0 {
t.Errorf("there should be 0 configmaps created but there were: %d", len(cms.Items))
}
})
return objs
}
t.Run("expectedIngresses", func(t *testing.T) {
objs := testCommon(infrastructure.Kubernetes)
if len(objs.Ingresses) != 1 {
t.Error()
}
if objs.Ingresses[0].Spec.Rules[0].Host != "wsid-1.down.on.earth" {
t.Error()
}
})
t.Run("expectedRoutes", func(t *testing.T) {
objs := testCommon(infrastructure.OpenShiftv4)
if len(objs.Routes) != 1 {
t.Error()
}
if objs.Routes[0].Spec.Host != "wsid-1.down.on.earth" {
t.Error()
}
})
}
func TestReportRelocatableExposedEndpoints(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
routing := relocatableDevWorkspaceRouting()
_, solver, objs := getSpecObjects(t, routing)
exposed, ready, err := solver.GetExposedEndpoints(routing.Spec.Endpoints, objs)
if err != nil {
t.Fatal(err)
}
if !ready {
t.Errorf("The exposed endpoints should have been ready.")
}
if len(exposed) != 1 {
t.Errorf("There should have been 1 exposed endpoins but found %d", len(exposed))
}
m1, ok := exposed["m1"]
if !ok {
t.Errorf("The exposed endpoints should have been defined on the m1 component.")
}
if len(m1) != 3 {
t.Fatalf("There should have been 3 endpoints for m1.")
}
e1 := m1[0]
if e1.Name != "e1" {
t.Errorf("The first endpoint should have been e1 but is %s", e1.Name)
}
if e1.Url != "https://over.the.rainbow/wsid/m1/9999/1/" {
t.Errorf("The e1 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/1/", e1.Url)
}
e2 := m1[1]
if e2.Name != "e2" {
t.Errorf("The second endpoint should have been e2 but is %s", e1.Name)
}
if e2.Url != "https://over.the.rainbow/wsid/m1/9999/2.js" {
t.Errorf("The e2 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/2.js", e2.Url)
}
e3 := m1[2]
if e3.Name != "e3" {
t.Errorf("The third endpoint should have been e3 but is %s", e1.Name)
}
if e3.Url != "https://over.the.rainbow/wsid/m1/9999/" {
t.Errorf("The e3 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/", e3.Url)
}
}
func TestReportSubdomainExposedEndpoints(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
routing := subdomainDevWorkspaceRouting()
_, solver, objs := getSpecObjects(t, routing)
exposed, ready, err := solver.GetExposedEndpoints(routing.Spec.Endpoints, objs)
if err != nil {
t.Fatal(err)
}
if !ready {
t.Errorf("The exposed endpoints should have been ready.")
}
if len(exposed) != 1 {
t.Errorf("There should have been 1 exposed endpoins but found %d", len(exposed))
}
m1, ok := exposed["m1"]
if !ok {
t.Errorf("The exposed endpoints should have been defined on the m1 component.")
}
if len(m1) != 3 {
t.Fatalf("There should have been 3 endpoints for m1.")
}
e1 := m1[0]
if e1.Name != "e1" {
t.Errorf("The first endpoint should have been e1 but is %s", e1.Name)
}
if e1.Url != "https://wsid-1.down.on.earth/1/" {
t.Errorf("The e1 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/1/", e1.Url)
}
e2 := m1[1]
if e2.Name != "e2" {
t.Errorf("The second endpoint should have been e2 but is %s", e1.Name)
}
if e2.Url != "https://wsid-1.down.on.earth/2.js" {
t.Errorf("The e2 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/2.js", e2.Url)
}
e3 := m1[2]
if e3.Name != "e3" {
t.Errorf("The third endpoint should have been e3 but is %s", e1.Name)
}
if e3.Url != "http://wsid-1.down.on.earth/" {
t.Errorf("The e3 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/", e3.Url)
}
}
func TestFinalize(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
routing := relocatableDevWorkspaceRouting()
cl, slv, _ := getSpecObjects(t, routing)
// the create test checks that during the above call, the solver created the 2 traefik configmaps
// (1 for the main config and the second for the devworkspace)
// now, let the solver finalize the routing
if err := slv.Finalize(routing); err != nil {
t.Fatal(err)
}
cms := &corev1.ConfigMapList{}
cl.List(context.TODO(), cms)
if len(cms.Items) != 0 {
t.Fatalf("There should be just 0 configmaps after routing finalization, but there were %d found", len(cms.Items))
}
}
func TestEndpointsAlwaysOnSecureProtocolsWhenExposedThroughGateway(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
routing := relocatableDevWorkspaceRouting()
_, slv, objs := getSpecObjects(t, routing)
exposed, ready, err := slv.GetExposedEndpoints(routing.Spec.Endpoints, objs)
if err != nil {
t.Fatal(err)
}
if !ready {
t.Errorf("The exposed endpoints should be considered ready.")
}
for _, endpoints := range exposed {
for _, endpoint := range endpoints {
if !strings.HasPrefix(endpoint.Url, "https://") {
t.Errorf("The endpoint %s should be exposed on https.", endpoint.Url)
}
}
}
}
func TestUsesIngressAnnotationsForWorkspaceEndpointIngresses(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
mgr := &v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "che",
Namespace: "ns",
Finalizers: []string{controller.FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "over.the.rainbow",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "down.on.earth",
},
K8s: v2alpha1.CheClusterSpecK8s{
IngressAnnotations: map[string]string{
"a": "b",
},
},
},
}
_, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting())
if len(objs.Ingresses) != 1 {
t.Fatalf("Unexpected number of generated ingresses: %d", len(objs.Ingresses))
}
ingress := objs.Ingresses[0]
if len(ingress.Annotations) != 3 {
// 3 annotations - a => b, endpoint-name and component-name
t.Fatalf("Unexpected number of annotations on the generated ingress: %d", len(ingress.Annotations))
}
if ingress.Annotations["a"] != "b" {
t.Errorf("Unexpected value of the custom endpoint ingress annotation")
}
}
func TestUsesCustomCertificateForWorkspaceEndpointIngresses(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
mgr := &v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "che",
Namespace: "ns",
Finalizers: []string{controller.FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "beyond.comprehension",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "almost.trivial",
TlsSecretName: "tlsSecret",
},
},
}
_, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting(), &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "tlsSecret",
Namespace: "ns",
},
Data: map[string][]byte{
"tls.key": []byte("asdf"),
"tls.crt": []byte("qwer"),
},
})
if len(objs.Ingresses) != 1 {
t.Fatalf("Unexpected number of generated ingresses: %d", len(objs.Ingresses))
}
ingress := objs.Ingresses[0]
if len(ingress.Spec.TLS) != 1 {
t.Fatalf("Unexpected number of TLS records on the ingress: %d", len(ingress.Spec.TLS))
}
if ingress.Spec.TLS[0].SecretName != "wsid-endpoints" {
t.Errorf("Unexpected name of the TLS secret on the ingress: %s", ingress.Spec.TLS[0].SecretName)
}
if len(ingress.Spec.TLS[0].Hosts) != 1 {
t.Fatalf("Unexpected number of host records on the TLS spec: %d", len(ingress.Spec.TLS[0].Hosts))
}
if ingress.Spec.TLS[0].Hosts[0] != "wsid-1.almost.trivial" {
t.Errorf("Unexpected host name of the TLS spec: %s", ingress.Spec.TLS[0].Hosts[0])
}
}
func TestUsesCustomCertificateForWorkspaceEndpointRoutes(t *testing.T) {
infrastructure.InitializeForTesting(infrastructure.OpenShiftv4)
mgr := &v2alpha1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "che",
Namespace: "ns",
Finalizers: []string{controller.FinalizerName},
},
Spec: v2alpha1.CheClusterSpec{
Gateway: v2alpha1.CheGatewaySpec{
Host: "beyond.comprehension",
},
WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{
BaseDomain: "almost.trivial",
TlsSecretName: "tlsSecret",
},
},
}
_, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting(), &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "tlsSecret",
Namespace: "ns",
},
Data: map[string][]byte{
"tls.key": []byte("asdf"),
"tls.crt": []byte("qwer"),
},
})
if len(objs.Routes) != 1 {
t.Fatalf("Unexpected number of generated routes: %d", len(objs.Routes))
}
route := objs.Routes[0]
if route.Spec.TLS.Certificate != "qwer" {
t.Errorf("Unexpected name of the TLS certificate on the route: %s", route.Spec.TLS.Certificate)
}
if route.Spec.TLS.Key != "asdf" {
t.Errorf("Unexpected key of TLS spec: %s", route.Spec.TLS.Key)
}
}
func asV1(v2Obj *v2alpha1.CheCluster) *v1.CheCluster {
return org.AsV1(v2Obj)
}

View File

@ -0,0 +1,5 @@
// Package solver contains the implementation of the "devworkspace routing solver" which provides che-specific
// logic to the otherwise generic dev workspace routing controller.
// The devworkspace routing controller needs to be provided with a "solver getter" in its configuration prior
// to starting the reconciliation loop. See `CheRouterGetter`.
package solver

View File

@ -0,0 +1,239 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solver
import (
"context"
"fmt"
dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/eclipse-che/che-operator/api/v2alpha1"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
routev1 "github.com/openshift/api/route/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type IngressExposer struct {
devWorkspaceID string
baseDomain string
ingressAnnotations map[string]string
tlsSecretName string
}
type RouteExposer struct {
devWorkspaceID string
baseDomain string
tlsSecretKey string
tlsSecretCertificate string
}
type EndpointInfo struct {
order int
componentName string
endpointName string
port int32
scheme string
service *corev1.Service
}
// This method is used compose the object names (both Kubernetes objects and "objects" within Traefik configuration)
// representing object endpoints.
func getEndpointExposingObjectName(componentName string, workspaceID string, port int32, endpointName string) string {
if endpointName == "" {
return fmt.Sprintf("%s-%s-%d", workspaceID, componentName, port)
}
return fmt.Sprintf("%s-%s-%d-%s", workspaceID, componentName, port, endpointName)
}
func (e *RouteExposer) initFrom(ctx context.Context, cl client.Client, cluster *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting) error {
e.baseDomain = cluster.Status.WorkspaceBaseDomain
e.devWorkspaceID = routing.Spec.DevWorkspaceId
if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" {
secret := &corev1.Secret{}
err := cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret)
if err != nil {
return err
}
e.tlsSecretKey = string(secret.Data["tls.key"])
e.tlsSecretCertificate = string(secret.Data["tls.crt"])
}
return nil
}
func (e *IngressExposer) initFrom(ctx context.Context, cl client.Client, cluster *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, ingressAnnotations map[string]string) error {
e.baseDomain = cluster.Status.WorkspaceBaseDomain
e.devWorkspaceID = routing.Spec.DevWorkspaceId
e.ingressAnnotations = ingressAnnotations
if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" {
tlsSecretName := routing.Spec.DevWorkspaceId + "-endpoints"
e.tlsSecretName = tlsSecretName
secret := &corev1.Secret{}
// check that there is no secret with the anticipated name yet
err := cl.Get(ctx, client.ObjectKey{Name: tlsSecretName, Namespace: routing.Namespace}, secret)
if errors.IsNotFound(err) {
secret = &corev1.Secret{}
err = cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret)
if err != nil {
return err
}
yes := true
newSecret := &corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: tlsSecretName,
Namespace: routing.Namespace,
OwnerReferences: []v1.OwnerReference{
{
Name: routing.Name,
Kind: routing.Kind,
APIVersion: routing.APIVersion,
UID: routing.UID,
Controller: &yes,
BlockOwnerDeletion: &yes,
},
},
},
Type: secret.Type,
Data: secret.Data,
}
return cl.Create(ctx, newSecret)
}
}
return nil
}
func (e *RouteExposer) getRouteForService(endpoint *EndpointInfo) routev1.Route {
targetEndpoint := intstr.FromInt(int(endpoint.port))
route := routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: getEndpointExposingObjectName(endpoint.componentName, e.devWorkspaceID, endpoint.port, endpoint.endpointName),
Namespace: endpoint.service.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: e.devWorkspaceID,
},
Annotations: routeAnnotations(endpoint.componentName, endpoint.endpointName),
OwnerReferences: endpoint.service.OwnerReferences,
},
Spec: routev1.RouteSpec{
Host: hostName(endpoint.order, e.devWorkspaceID, e.baseDomain),
To: routev1.RouteTargetReference{
Kind: "Service",
Name: endpoint.service.Name,
},
Port: &routev1.RoutePort{
TargetPort: targetEndpoint,
},
},
}
if isSecureScheme(endpoint.scheme) {
route.Spec.TLS = &routev1.TLSConfig{
InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect,
Termination: routev1.TLSTerminationEdge,
}
if e.tlsSecretKey != "" {
route.Spec.TLS.Key = e.tlsSecretKey
route.Spec.TLS.Certificate = e.tlsSecretCertificate
}
}
return route
}
func (e *IngressExposer) getIngressForService(endpoint *EndpointInfo) v1beta1.Ingress {
targetEndpoint := intstr.FromInt(int(endpoint.port))
hostname := hostName(endpoint.order, e.devWorkspaceID, e.baseDomain)
ingressPathType := v1beta1.PathTypeImplementationSpecific
ingress := v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: getEndpointExposingObjectName(endpoint.componentName, e.devWorkspaceID, endpoint.port, endpoint.endpointName),
Namespace: endpoint.service.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: e.devWorkspaceID,
},
Annotations: finalizeIngressAnnotations(e.ingressAnnotations, endpoint.componentName, endpoint.endpointName),
OwnerReferences: endpoint.service.OwnerReferences,
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: hostname,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Backend: v1beta1.IngressBackend{
ServiceName: endpoint.service.Name,
ServicePort: targetEndpoint,
},
PathType: &ingressPathType,
Path: "/",
},
},
},
},
},
},
},
}
if isSecureScheme(endpoint.scheme) && e.tlsSecretName != "" {
ingress.Spec.TLS = []v1beta1.IngressTLS{
{
Hosts: []string{hostname},
SecretName: e.tlsSecretName,
},
}
}
return ingress
}
func hostName(order int, workspaceID string, baseDomain string) string {
return fmt.Sprintf("%s-%d.%s", workspaceID, order+1, baseDomain)
}
func routeAnnotations(machineName string, endpointName string) map[string]string {
return map[string]string{
defaults.ConfigAnnotationEndpointName: endpointName,
defaults.ConfigAnnotationComponentName: machineName,
}
}
func finalizeIngressAnnotations(ingressAnnotations map[string]string, machineName string, endpointName string) map[string]string {
annos := map[string]string{}
for k, v := range ingressAnnotations {
annos[k] = v
}
annos[defaults.ConfigAnnotationEndpointName] = endpointName
annos[defaults.ConfigAnnotationComponentName] = machineName
return annos
}

View File

@ -0,0 +1,199 @@
//
// Copyright (c) 2019-2020 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solver
import (
"fmt"
"time"
"github.com/devfile/devworkspace-operator/pkg/constants"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers"
"github.com/eclipse-che/che-operator/api/v2alpha1"
controller "github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var (
logger = ctrl.Log.WithName("solver")
)
// CheRoutingSolver is a struct representing the routing solver for Che specific routing of devworkspaces
type CheRoutingSolver struct {
client client.Client
scheme *runtime.Scheme
}
// Magic to ensure we get compile time error right here if our struct doesn't support the interface.
var _ solvers.RoutingSolverGetter = (*CheRouterGetter)(nil)
var _ solvers.RoutingSolver = (*CheRoutingSolver)(nil)
// CheRouterGetter negotiates the solver with the calling code
type CheRouterGetter struct {
scheme *runtime.Scheme
}
// Getter creates a new CheRouterGetter
func Getter(scheme *runtime.Scheme) *CheRouterGetter {
return &CheRouterGetter{
scheme: scheme,
}
}
func (g *CheRouterGetter) HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool {
return isSupported(routingClass)
}
func (g *CheRouterGetter) GetSolver(client client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (solver solvers.RoutingSolver, err error) {
if !isSupported(routingClass) {
return nil, solvers.RoutingNotSupported
}
return &CheRoutingSolver{client: client, scheme: g.scheme}, nil
}
func (g *CheRouterGetter) SetupControllerManager(mgr *builder.Builder) error {
// We want to watch configmaps and re-map the reconcile on the devworkspace routing, if possible
// This way we can react on changes of the gateway configmap changes by re-reconciling the corresponding
// devworkspace routing and thus keeping the devworkspace routing in a functional state
// TODO is this going to be performant enough in a big cluster with very many configmaps?
mgr.Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(func(mo handler.MapObject) []reconcile.Request {
applicable, key := isGatewayWorkspaceConfig(mo.Meta)
if applicable {
// cool, we can trigger the reconcile of the routing so that we can update the configmap that has just changed under our hands
return []reconcile.Request{
{
NamespacedName: key,
},
}
} else {
return []reconcile.Request{}
}
})})
return nil
}
func isGatewayWorkspaceConfig(obj metav1.Object) (bool, types.NamespacedName) {
workspaceID := obj.GetLabels()[constants.DevWorkspaceIDLabel]
objectName := obj.GetName()
// bail out quickly if we're not dealing with a configmap with an expected name
if objectName != defaults.GetGatewayWorkpaceConfigMapName(workspaceID) {
return false, types.NamespacedName{}
}
routingName := obj.GetAnnotations()[defaults.ConfigAnnotationDevWorkspaceRoutingName]
routingNamespace := obj.GetAnnotations()[defaults.ConfigAnnotationDevWorkspaceRoutingNamespace]
// if there is no annotation for the routing, we're out of luck.. this should not happen though
if routingName == "" {
return false, types.NamespacedName{}
}
// cool, we found a configmap belonging to a concrete devworkspace routing
return true, types.NamespacedName{Name: routingName, Namespace: routingNamespace}
}
func (c *CheRoutingSolver) FinalizerRequired(routing *controllerv1alpha1.DevWorkspaceRouting) bool {
return true
}
func (c *CheRoutingSolver) Finalize(routing *controllerv1alpha1.DevWorkspaceRouting) error {
cheManager, err := cheManagerOfRouting(routing)
if err != nil {
return err
}
return c.cheRoutingFinalize(cheManager, routing)
}
// GetSpecObjects constructs cluster routing objects which should be applied on the cluster
func (c *CheRoutingSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta solvers.DevWorkspaceMetadata) (solvers.RoutingObjects, error) {
cheManager, err := cheManagerOfRouting(routing)
if err != nil {
return solvers.RoutingObjects{}, err
}
return c.cheSpecObjects(cheManager, routing, workspaceMeta)
}
// GetExposedEndpoints retreives the URL for each endpoint in a devfile spec from a set of RoutingObjects.
// Returns is a map from component ids (as defined in the devfile) to the list of endpoints for that component
// Return value "ready" specifies if all endpoints are resolved on the cluster; if false it is necessary to retry, as
// URLs will be undefined.
func (c *CheRoutingSolver) GetExposedEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, routingObj solvers.RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) {
if len(routingObj.Services) == 0 {
return map[string]controllerv1alpha1.ExposedEndpointList{}, true, nil
}
managerName := routingObj.Services[0].Annotations[defaults.ConfigAnnotationCheManagerName]
managerNamespace := routingObj.Services[0].Annotations[defaults.ConfigAnnotationCheManagerNamespace]
workspaceID := routingObj.Services[0].Labels[constants.DevWorkspaceIDLabel]
manager, err := findCheManager(client.ObjectKey{Name: managerName, Namespace: managerNamespace})
if err != nil {
return nil, false, err
}
return c.cheExposedEndpoints(manager, workspaceID, endpoints, routingObj)
}
func isSupported(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool {
return routingClass == "che"
}
func cheManagerOfRouting(routing *controllerv1alpha1.DevWorkspaceRouting) (*v2alpha1.CheCluster, error) {
cheName := routing.Annotations[defaults.ConfigAnnotationCheManagerName]
cheNamespace := routing.Annotations[defaults.ConfigAnnotationCheManagerNamespace]
return findCheManager(client.ObjectKey{Name: cheName, Namespace: cheNamespace})
}
func findCheManager(cheManagerKey client.ObjectKey) (*v2alpha1.CheCluster, error) {
managers := controller.GetCurrentCheClusterInstances()
if len(managers) == 0 {
// the CheManager has not been reconciled yet, so let's wait a bit
return &v2alpha1.CheCluster{}, &solvers.RoutingNotReady{Retry: 1 * time.Second}
}
if len(cheManagerKey.Name) == 0 {
if len(managers) > 1 {
return &v2alpha1.CheCluster{}, &solvers.RoutingInvalid{Reason: fmt.Sprintf("the routing does not specify any Che manager in its configuration but there are %d Che managers in the cluster", len(managers))}
}
for _, m := range managers {
return &m, nil
}
}
if m, ok := managers[cheManagerKey]; ok {
return &m, nil
}
logger.Info("Routing requires a non-existing che manager. Retrying in 10 seconds.", "key", cheManagerKey)
return &v2alpha1.CheCluster{}, &solvers.RoutingNotReady{Retry: 10 * time.Second}
}

View File

@ -0,0 +1,39 @@
package solver
// A representation of the Traefik config as we need it. This is in no way complete but can be used for the purposes we need it for.
type traefikConfig struct {
HTTP traefikConfigHTTP `json:"http"`
}
type traefikConfigHTTP struct {
Routers map[string]traefikConfigRouter `json:"routers"`
Services map[string]traefikConfigService `json:"services"`
Middlewares map[string]traefikConfigMiddleware `json:"middlewares"`
}
type traefikConfigRouter struct {
Rule string `json:"rule"`
Service string `json:"service"`
Middlewares []string `json:"middlewares"`
Priority int `json:"priority"`
}
type traefikConfigService struct {
LoadBalancer traefikConfigLoadbalancer `json:"loadBalancer"`
}
type traefikConfigMiddleware struct {
StripPrefix traefikConfigStripPrefix `json:"stripPrefix"`
}
type traefikConfigLoadbalancer struct {
Servers []traefikConfigLoadbalancerServer `json:"servers"`
}
type traefikConfigLoadbalancerServer struct {
URL string `json:"url"`
}
type traefikConfigStripPrefix struct {
Prefixes []string `json:"prefixes"`
}

View File

@ -0,0 +1,208 @@
//
// Copyright (c) 2019-2020 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package sync
import (
"context"
"fmt"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
var (
log = ctrl.Log.WithName("sync")
)
// Syncer synchronized K8s objects with the cluster
type Syncer struct {
client client.Client
scheme *runtime.Scheme
}
func New(client client.Client, scheme *runtime.Scheme) Syncer {
return Syncer{client: client, scheme: scheme}
}
// Sync syncs the blueprint to the cluster in a generic (as much as Go allows) manner.
// Returns true if the object was created or updated, false if there was no change detected.
func (s *Syncer) Sync(ctx context.Context, owner metav1.Object, blueprint metav1.Object, diffOpts cmp.Option) (bool, runtime.Object, error) {
blueprintObject, ok := blueprint.(runtime.Object)
if !ok {
return false, nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", blueprint)
}
key := client.ObjectKey{Name: blueprint.GetName(), Namespace: blueprint.GetNamespace()}
actual := blueprintObject.DeepCopyObject()
if getErr := s.client.Get(context.TODO(), key, actual); getErr != nil {
if statusErr, ok := getErr.(*errors.StatusError); !ok || statusErr.Status().Reason != metav1.StatusReasonNotFound {
return false, nil, getErr
}
actual = nil
}
if actual == nil {
actual, err := s.create(ctx, owner, key, blueprint)
if err != nil {
return false, actual, err
}
return true, actual, nil
}
return s.update(ctx, owner, actual, blueprint, diffOpts)
}
// Delete deletes the supplied object from the cluster.
func (s *Syncer) Delete(ctx context.Context, object metav1.Object) error {
key := client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()}
var err error
ro, ok := object.(runtime.Object)
if !ok {
return fmt.Errorf("Could not use the supplied object as kubernetes runtime object. That's unexpected: %s", object)
}
if err = s.client.Get(ctx, key, ro); err == nil {
err = s.client.Delete(ctx, ro)
}
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}
func (s *Syncer) create(ctx context.Context, owner metav1.Object, key client.ObjectKey, blueprint metav1.Object) (runtime.Object, error) {
blueprintObject, ok := blueprint.(runtime.Object)
kind := blueprintObject.GetObjectKind().GroupVersionKind().Kind
if !ok {
return nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", blueprint)
}
actual := blueprintObject.DeepCopyObject()
log.Info("Creating a new object", "kind", kind, "name", blueprint.GetName(), "namespace", blueprint.GetNamespace())
obj, err := s.setOwnerReferenceAndConvertToRuntime(owner, blueprint)
if err != nil {
return nil, err
}
err = s.client.Create(ctx, obj)
if err != nil {
if !errors.IsAlreadyExists(err) {
return nil, err
}
// ok, we got an already-exists error. So let's try to load the object into "actual".
// if we fail this retry for whatever reason, just give up rather than retrying this in a loop...
// the reconciliation loop will lead us here again in the next round.
if err = s.client.Get(ctx, key, actual); err != nil {
return nil, err
}
}
return actual, nil
}
func (s *Syncer) update(ctx context.Context, owner metav1.Object, actual runtime.Object, blueprint metav1.Object, diffOpts cmp.Option) (bool, runtime.Object, error) {
actualMeta := actual.(metav1.Object)
diff := cmp.Diff(actual, blueprint, diffOpts)
if len(diff) > 0 {
kind := actual.GetObjectKind().GroupVersionKind().Kind
log.Info("Updating existing object", "kind", kind, "name", actualMeta.GetName(), "namespace", actualMeta.GetNamespace())
// we need to handle labels and annotations specially in case the cluster admin has modified them.
// if the current object in the cluster has the same annos/labels, they get overwritten with what's
// in the blueprint. Any additional labels/annos on the object are kept though.
targetLabels := map[string]string{}
targetAnnos := map[string]string{}
for k, v := range actualMeta.GetAnnotations() {
targetAnnos[k] = v
}
for k, v := range actualMeta.GetLabels() {
targetLabels[k] = v
}
for k, v := range blueprint.GetAnnotations() {
targetAnnos[k] = v
}
for k, v := range blueprint.GetLabels() {
targetLabels[k] = v
}
blueprint.SetAnnotations(targetAnnos)
blueprint.SetLabels(targetLabels)
if isUpdateUsingDeleteCreate(actual.GetObjectKind().GroupVersionKind().Kind) {
err := s.client.Delete(ctx, actual)
if err != nil {
return false, actual, err
}
key := client.ObjectKey{Name: actualMeta.GetName(), Namespace: actualMeta.GetNamespace()}
obj, err := s.create(ctx, owner, key, blueprint)
return false, obj, err
} else {
obj, err := s.setOwnerReferenceAndConvertToRuntime(owner, blueprint)
if err != nil {
return false, actual, err
}
// to be able to update, we need to set the resource version of the object that we know of
obj.(metav1.Object).SetResourceVersion(actualMeta.GetResourceVersion())
err = s.client.Update(ctx, obj)
if err != nil {
return false, obj, err
}
return true, obj, nil
}
}
return false, actual, nil
}
func isUpdateUsingDeleteCreate(kind string) bool {
// Routes are not able to update the host, so we just need to re-create them...
// ingresses and services have been identified to needs this, too, for reasons that I don't know..
return "Service" == kind || "Ingress" == kind || "Route" == kind
}
func (s *Syncer) setOwnerReferenceAndConvertToRuntime(owner metav1.Object, obj metav1.Object) (runtime.Object, error) {
robj, ok := obj.(runtime.Object)
if !ok {
return nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", obj)
}
if owner == nil {
return robj, nil
}
err := controllerutil.SetControllerReference(owner, obj, s.scheme)
if err != nil {
return nil, err
}
return robj, nil
}

View File

@ -0,0 +1,195 @@
package sync
import (
"context"
"reflect"
"testing"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var (
scheme = runtime.NewScheme()
)
func init() {
infrastructure.InitializeForTesting(infrastructure.Kubernetes)
corev1.AddToScheme(scheme)
}
func TestSyncCreates(t *testing.T) {
preexisting := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting",
Namespace: "default",
},
}
new := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "new",
Namespace: "default",
},
}
cl := fake.NewFakeClientWithScheme(scheme, preexisting)
syncer := Syncer{client: cl, scheme: scheme}
syncer.Sync(context.TODO(), preexisting, new, cmp.Options{})
synced := &corev1.Pod{}
key := client.ObjectKey{Name: "new", Namespace: "default"}
cl.Get(context.TODO(), key, synced)
if synced.Name != "new" {
t.Error("The synced object should have the expected name")
}
if len(synced.OwnerReferences) == 0 {
t.Fatal("There should have been an owner reference set")
}
if synced.OwnerReferences[0].Name != "preexisting" {
t.Error("Unexpected owner reference")
}
}
func TestSyncUpdates(t *testing.T) {
preexisting := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting",
Namespace: "default",
OwnerReferences: []metav1.OwnerReference{
{
Name: "preexisting",
Kind: "Pod",
},
},
},
}
newOwner := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "newOwner",
Namespace: "default",
},
}
update := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting",
Namespace: "default",
Labels: map[string]string{
"a": "b",
},
},
}
cl := fake.NewFakeClientWithScheme(scheme, preexisting)
syncer := Syncer{client: cl, scheme: scheme}
syncer.Sync(context.TODO(), newOwner, update, cmp.Options{})
synced := &corev1.Pod{}
key := client.ObjectKey{Name: "preexisting", Namespace: "default"}
cl.Get(context.TODO(), key, synced)
if synced.Name != "preexisting" {
t.Error("The synced object should have the expected name")
}
if len(synced.OwnerReferences) == 0 {
t.Fatal("There should have been an owner reference set")
}
if synced.OwnerReferences[0].Name != "newOwner" {
t.Error("Unexpected owner reference")
}
if len(synced.GetLabels()) == 0 {
t.Fatal("There should have been labels on the synced object")
}
if synced.GetLabels()["a"] != "b" {
t.Error("Unexpected label")
}
}
func TestSyncKeepsAdditionalAnnosAndLabels(t *testing.T) {
preexisting := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting",
Namespace: "default",
Labels: map[string]string{
"a": "x",
"k": "v",
},
Annotations: map[string]string{
"a": "x",
"k": "v",
},
},
}
owner := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "owner",
Namespace: "default",
},
}
update := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "preexisting",
Namespace: "default",
Labels: map[string]string{
"a": "b",
"c": "d",
},
Annotations: map[string]string{
"a": "b",
"c": "d",
},
},
}
cl := fake.NewFakeClientWithScheme(scheme, preexisting)
syncer := Syncer{client: cl, scheme: scheme}
syncer.Sync(context.TODO(), owner, update, cmp.Options{})
synced := &corev1.Pod{}
key := client.ObjectKey{Name: "preexisting", Namespace: "default"}
cl.Get(context.TODO(), key, synced)
if synced.Name != "preexisting" {
t.Error("The synced object should have the expected name")
}
expectedValues := map[string]string{
"a": "b",
"k": "v",
"c": "d",
}
if !reflect.DeepEqual(expectedValues, synced.Labels) {
t.Fatal("Unexpected labels on the synced object")
}
if !reflect.DeepEqual(expectedValues, synced.Annotations) {
t.Fatal("Unexpected annotations on the synced object")
}
}

4
go.mod
View File

@ -6,6 +6,8 @@ require (
github.com/Shopify/logrus-bugsnag v0.0.0-00010101000000-000000000000 // indirect
github.com/bitly/go-simplejson v0.0.0-00010101000000-000000000000 // indirect
github.com/che-incubator/kubernetes-image-puller-operator v0.0.0-20210428110012-14ef54b7dbf4
github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b
github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d
github.com/go-logr/logr v0.4.0
github.com/golang/mock v1.5.0
github.com/google/go-cmp v0.5.2
@ -16,7 +18,7 @@ require (
github.com/operator-framework/operator-lifecycle-manager v0.18.1
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/sirupsen/logrus v1.7.0
go.uber.org/zap v1.13.0
go.uber.org/zap v1.16.0
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect
k8s.io/api v0.21.1

27
go.sum
View File

@ -51,14 +51,17 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM=
github.com/aws/aws-sdk-go v0.0.0-20210122191723-2c7b39c8f2e2/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@ -143,6 +146,10 @@ github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVz
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
github.com/denisenkom/go-mssqldb v0.0.0-20190204142019-df6d76eb9289/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc=
github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b h1:N00ORHA5iamvPKpDFfSAkAczAaCBvK8l0EzAphsgFSI=
github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b/go.mod h1:QNzaIVQnCsYfXed+QZOn1uvEQFzyhvpi/uc3g/b2ws0=
github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d h1:m0AhacO7IrwysBlLWDunITEkxITciGaO5e6uMN0t1XQ=
github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d/go.mod h1:Rfz7VVnXRpM4dT7UgMwV8zp6qHCggi39mBrN+i69pRo=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@ -173,6 +180,7 @@ github.com/edsrzf/mmap-go v0.0.0-20181215214921-188cc3b666ba/go.mod h1:YO35OhQPt
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20200129102538-a2fa14558f9a/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.0.0-20200213201256-ba8e577f987f/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
@ -196,10 +204,15 @@ github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/globalsign/mgo v0.0.0-20160323214708-72aab81a5dec/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M=
github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
@ -289,11 +302,14 @@ github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
@ -335,6 +351,7 @@ github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8=
@ -352,6 +369,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@ -374,6 +392,7 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH
github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk=
github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao=
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58=
github.com/lucasjones/reggen v0.0.0-20200904144131-37ba4fa293bb/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20200218084223-8edcc4e51f39/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/maorfr/helm-plugin-utils v0.0.0-20181205064038-588190cb5e3b/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA=
@ -406,6 +425,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 h1:cvy4lBOYN3gKfKj8Lzz5Q9TfviP+L7koMHY7SvkyTKs=
github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
@ -427,6 +447,7 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/neo4j-drivers/gobolt v1.7.4/go.mod h1:O9AUbip4Dgre+CD3p40dnMD4a4r52QBIfblg5k7CTbE=
github.com/neo4j/neo4j-go-driver v1.7.4/go.mod h1:aPO0vVr+WnhEJne+FgFjfsjzAnssPFLucHgGZ76Zb/U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
@ -487,6 +508,8 @@ github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULU
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
github.com/redhat-cop/operator-utils v0.1.0 h1:K0/A5bQS+7cl2mMk6cFaTlmcf1/cNepp6C5digjmysM=
github.com/redhat-cop/operator-utils v0.1.0/go.mod h1:K9f0vBA2bBiDyg9bsGDUojdwdhwUvHKX5QW0B+brWgo=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
@ -497,6 +520,7 @@ github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
@ -534,6 +558,7 @@ github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmF
github.com/vmware/govmomi v0.0.0-20201221180647-1ec59a7c0002/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc=
github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -642,6 +667,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -776,6 +802,7 @@ gopkg.in/tomb.v1 v1.0.0-20161208151619-d5d1b5820637/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

80
main.go
View File

@ -19,15 +19,21 @@ import (
"go.uber.org/zap/zapcore"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
"k8s.io/client-go/discovery"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
osruntime "runtime"
dwo_api "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
dwr "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
"fmt"
"github.com/go-logr/logr"
@ -39,6 +45,8 @@ import (
checontroller "github.com/eclipse-che/che-operator/controllers/che"
backupcontroller "github.com/eclipse-che/che-operator/controllers/checlusterbackup"
restorecontroller "github.com/eclipse-che/che-operator/controllers/checlusterrestore"
"github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/controllers/devworkspace/solver"
"github.com/eclipse-che/che-operator/pkg/deploy"
"github.com/eclipse-che/che-operator/pkg/signal"
"github.com/eclipse-che/che-operator/pkg/util"
@ -189,7 +197,11 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "e79b08a4.org.eclipse.che",
Namespace: watchNamespace,
// NOTE: We CANNOT limit the manager to a single namespace, because that would limit the
// devworkspace routing reconciler to a single namespace, which would make it totally unusable.
// Instead, if some controller wants to limit itself to single namespace, it can do it
// for example using an event filter, as checontroller does.
// Namespace: watchNamespace,
// TODO try to use it instead of signal handler....
// GracefulShutdownTimeout: ,
})
@ -198,13 +210,13 @@ func main() {
os.Exit(1)
}
cheReconciler, err := checontroller.NewReconciler(mgr)
cheReconciler, err := checontroller.NewReconciler(mgr, watchNamespace)
if err != nil {
setupLog.Error(err, "unable to create checluster reconciler")
os.Exit(1)
}
backupReconciler := backupcontroller.NewReconciler(mgr)
restoreReconciler := restorecontroller.NewReconciler(mgr)
backupReconciler := backupcontroller.NewReconciler(mgr, watchNamespace)
restoreReconciler := restorecontroller.NewReconciler(mgr, watchNamespace)
if err = cheReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to set up controller", "controller", "CheCluster")
@ -219,6 +231,11 @@ func main() {
os.Exit(1)
}
if err = enableDevworkspaceSupport(mgr); err != nil {
setupLog.Error(err, "unable to initialize devworkspace support")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
@ -238,3 +255,58 @@ func main() {
os.Exit(1)
}
}
func enableDevworkspaceSupport(mgr manager.Manager) error {
// DWO and DWCO use the infrastructure package for openshift detection. It needs to be initialized
// but only supports OpenShift v4 or Kubernetes.
if err := infrastructure.Initialize(); err != nil {
setupLog.Info("devworkspace cannot run on this infrastructure")
return nil
}
// we install the devworkspace CheCluster reconciler even if dw is not supported so that it
// can write meaningful status messages into the CheCluster CRs.
dwChe := devworkspace.CheClusterReconciler{}
if err := dwChe.SetupWithManager(mgr); err != nil {
return err
}
// we only enable Devworkspace support, if there is the controller.devfile.io resource group in the cluster
// we assume that if the group is there, then we have all the expected CRs there, too.
cl, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
groups, err := cl.ServerGroups()
if err != nil {
return err
}
supported := false
for _, g := range groups.Groups {
if g.Name == "controller.devfile.io" {
supported = true
break
}
}
if supported {
if err := dwo_api.AddToScheme(mgr.GetScheme()); err != nil {
return err
}
routing := dwr.DevWorkspaceRoutingReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("DevWorkspaceRouting"),
Scheme: mgr.GetScheme(),
SolverGetter: solver.Getter(mgr.GetScheme()),
}
if err := routing.SetupWithManager(mgr); err != nil {
return err
}
}
return nil
}

View File

@ -28,7 +28,6 @@ init() {
FORCE_UPDATE=""
BUILDX_PLATFORMS="linux/amd64,linux/ppc64le"
DEV_WORKSPACE_CONTROLLER_VERSION="main"
DEV_WORKSPACE_CHE_OPERATOR_VERSION="main"
STABLE_CHANNELS=("stable-all-namespaces" "stable")
if [[ $# -lt 1 ]]; then usage; exit; fi
@ -43,7 +42,6 @@ init() {
'--check-resources') CHECK_RESOURCES=true; shift 0;;
'--prepare-community-operators-update') PREPARE_COMMUNITY_OPERATORS_UPDATE=true; shift 0;;
'--dev-workspace-controller-version') DEV_WORKSPACE_CONTROLLER_VERSION=$2; shift 1;;
'--dev-workspace-che-operator-version') DEV_WORKSPACE_CHE_OPERATOR_VERSION=$2; shift 1;;
'--force') FORCE_UPDATE="--force"; shift 0;;
'--help'|'-h') usage; exit;;
esac
@ -133,10 +131,6 @@ if ! grep -q "value: quay.io/eclipse/che-dashboard:$RELEASE" $filename; then
# use ${RELEASE} instead of master
wget https://raw.githubusercontent.com/eclipse-che/che-server/${RELEASE}/assembly/assembly-wsmaster-war/src/main/webapp/WEB-INF/classes/che/che.properties -q -O /tmp/che.properties
if ! grep -q "value: quay.io/che-incubator/devworkspace-che-operator:$DEV_WORKSPACE_CHE_OPERATOR_VERSION" $filename; then
echo "[ERROR] Unable to find devworkspace che operator image with version ${DEV_WORKSPACE_CHE_OPERATOR_VERSION} in the $filename"; exit 1
fi
plugin_broker_meta_image=$(cat /tmp/che.properties | grep che.workspace.plugin_broker.metadata.image | cut -d '=' -f2)
if ! grep -q "value: $plugin_broker_meta_image" $filename; then
echo "[ERROR] Unable to find plugin broker meta image '$plugin_broker_meta_image' in the $filename"; exit 1
@ -171,7 +165,7 @@ releaseOperatorCode() {
docker login quay.io -u "${QUAY_ECLIPSE_CHE_USERNAME}" -p "${QUAY_ECLIPSE_CHE_PASSWORD}"
echo "[INFO] releaseOperatorCode :: Build operator image in platforms: $BUILDX_PLATFORMS"
docker buildx build --build-arg DEV_WORKSPACE_CONTROLLER_VERSION=${DEV_WORKSPACE_CONTROLLER_VERSION} --build-arg DEV_WORKSPACE_CHE_OPERATOR_VERSION=${DEV_WORKSPACE_CHE_OPERATOR_VERSION} --platform "$BUILDX_PLATFORMS" --push -t "quay.io/eclipse/che-operator:${RELEASE}" .
docker buildx build --build-arg DEV_WORKSPACE_CONTROLLER_VERSION=${DEV_WORKSPACE_CONTROLLER_VERSION} --platform "$BUILDX_PLATFORMS" --push -t "quay.io/eclipse/che-operator:${RELEASE}" .
}
replaceImagesTags() {
@ -180,7 +174,6 @@ replaceImagesTags() {
lastDefaultCheServerImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_che_server\") | .value" "${OPERATOR_YAML}")
lastDefaultDashboardImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_dashboard\") | .value" "${OPERATOR_YAML}")
lastDefaultDevWorkspaceControllerImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devworkspace_controller\") | .value" "${OPERATOR_YAML}")
lastDefaultDevWorkspaceCheOperatorImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devworkspace_che_operator\") | .value" "${OPERATOR_YAML}")
lastDefaultKeycloakImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_keycloak\") | .value" "${OPERATOR_YAML}")
lastDefaultPluginRegistryImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_plugin_registry\") | .value" "${OPERATOR_YAML}")
lastDefaultDevfileRegistryImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devfile_registry\") | .value" "${OPERATOR_YAML}")
@ -188,7 +181,6 @@ replaceImagesTags() {
CHE_SERVER_IMAGE_REALEASE=$(replaceTag "${lastDefaultCheServerImage}" "${RELEASE}")
DASHBOARD_IMAGE_REALEASE=$(replaceTag "${lastDefaultDashboardImage}" "${RELEASE}")
DEVWORKSPACE_CONTROLLER_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevWorkspaceControllerImage}" "${DEV_WORKSPACE_CONTROLLER_VERSION}")
DEVWORKSPACE_CHE_OPERATOR_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevWorkspaceCheOperatorImage}" "${DEV_WORKSPACE_CHE_OPERATOR_VERSION}")
KEYCLOAK_IMAGE_RELEASE=$(replaceTag "${lastDefaultKeycloakImage}" "${RELEASE}")
PLUGIN_REGISTRY_IMAGE_RELEASE=$(replaceTag "${lastDefaultPluginRegistryImage}" "${RELEASE}")
DEVFILE_REGISTRY_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevfileRegistryImage}" "${RELEASE}")
@ -203,11 +195,9 @@ replaceImagesTags() {
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_che_server\") | .value ) = \"${CHE_SERVER_IMAGE_REALEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_dashboard\") | .value ) = \"${DASHBOARD_IMAGE_REALEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devworkspace_controller\") | .value ) = \"${DEVWORKSPACE_CONTROLLER_IMAGE_RELEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devworkspace_che_operator\") | .value ) = \"${DEVWORKSPACE_CHE_OPERATOR_IMAGE_RELEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_keycloak\") | .value ) = \"${KEYCLOAK_IMAGE_RELEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_plugin_registry\") | .value ) = \"${PLUGIN_REGISTRY_IMAGE_RELEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devfile_registry\") | .value ) = \"${DEVFILE_REGISTRY_IMAGE_RELEASE}\"" | \
yq -ryY "( .spec.template.spec.containers[] | select(.name == \"devworkspace-che-operator\") | .image ) = \"quay.io/che-incubator/devworkspace-che-operator:${DEV_WORKSPACE_CHE_OPERATOR_VERSION}\"" \
>> "${NEW_OPERATOR_YAML}"
mv "${NEW_OPERATOR_YAML}" "${OPERATOR_YAML}"
}
@ -230,7 +220,7 @@ releaseOlmFiles() {
for channel in "${STABLE_CHANNELS[@]}"
do
cd $RELEASE_DIR/olm
. release-olm-files.sh --release-version $RELEASE --channel $channel --dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION --dev-workspace-che-operator-version $DEV_WORKSPACE_CHE_OPERATOR_VERSION
. release-olm-files.sh --release-version $RELEASE --channel $channel --dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION
cd $RELEASE_DIR
local openshift=$RELEASE_DIR/bundle/$channel/eclipse-che-preview-openshift/manifests

View File

@ -61,9 +61,6 @@ setImagesFromDeploymentEnv
setOperatorImage
echo "${OPERATOR_IMAGE}"
setDevWorkspaceCheOperatorImage
echo ${DEVWORKSPACE_CHE_OPERATOR_IMAGE}
setPluginRegistryList
echo "${PLUGIN_REGISTRY_LIST}"
@ -130,7 +127,6 @@ rm -Rf "${DIGEST_FILE}"
touch "${DIGEST_FILE}"
writeDigest "${OPERATOR_IMAGE}" "operator-image"
writeDigest "${DEVWORKSPACE_CHE_OPERATOR_IMAGE}" "devworkspace-che-operator-image"
for image in ${REQUIRED_IMAGES}; do
writeDigest "${image}" "required-image"

View File

@ -18,10 +18,6 @@ setOperatorImage() {
OPERATOR_IMAGE=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[0].image' "${CSV}")
}
setDevWorkspaceCheOperatorImage() {
DEVWORKSPACE_CHE_OPERATOR_IMAGE=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[1].image' "${CSV}")
}
setPluginRegistryList() {
registry=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[].env[] | select(.name | test("RELATED_IMAGE_.*plugin_registry"; "g")) | .value' "${CSV}")
setRegistryImages "${registry}"

View File

@ -17,7 +17,6 @@ while [[ "$#" -gt 0 ]]; do
'--release-version') RELEASE=$2; shift 1;;
'--channel') CHANNEL=$2; shift 1;;
'--dev-workspace-controller-version') DEV_WORKSPACE_CONTROLLER_VERSION=$2; shift 1;;
'--dev-workspace-che-operator-version') DEV_WORKSPACE_CHE_OPERATOR_VERSION=$2; shift 1;;
esac
shift 1
done
@ -82,7 +81,6 @@ if [[ -z "$RELEASE" ]] || [[ -z "$RELEASE" ]] || [[ -z "$RELEASE" ]]; then
echo "One of the following required parameters is missing"
echo "--release-version $RELEASE"
echo "--dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION"
echo "--dev-workspace-che-operator-version $DEV_WORKSPACE_CHE_OPERATOR_VERSION"
exit 1
fi
@ -118,7 +116,6 @@ do
-e 's/imagePullPolicy: *Always/imagePullPolicy: IfNotPresent/' \
-e 's/"cheImageTag": *"next"/"cheImageTag": ""/' \
-e 's|quay.io/eclipse/che-dashboard:next|quay.io/eclipse/che-dashboard:'${RELEASE}'|' \
-e 's|quay.io/che-incubator/devworkspace-che-operator:next|quay.io/che-incubator/devworkspace-che-operator:'${DEV_WORKSPACE_CHE_OPERATOR_VERSION}'|' \
-e 's|quay.io/devfile/devworkspace-controller:next|quay.io/devfile/devworkspace-controller:'${DEV_WORKSPACE_CONTROLLER_VERSION}'|' \
-e 's|"identityProviderImage": *"quay.io/eclipse/che-keycloak:next"|"identityProviderImage": ""|' \
-e 's|"devfileRegistryImage": *"quay.io/eclipse/che-devfile-registry:next"|"devfileRegistryImage": ""|' \

View File

@ -30,7 +30,6 @@ var (
defaultCheServerImage string
defaultCheVersion string
defaultDashboardImage string
defaultDevworkspaceCheOperatorImage string
defaultDevworkspaceControllerImage string
defaultPluginRegistryImage string
defaultDevfileRegistryImage string
@ -176,7 +175,6 @@ func InitDefaultsFromFile(defaultsPath string) {
defaultCheVersion = util.GetDeploymentEnv(operatorDeployment, "CHE_VERSION")
defaultCheServerImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_che_server"))
defaultDashboardImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_dashboard"))
defaultDevworkspaceCheOperatorImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_che_operator"))
defaultDevworkspaceControllerImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_controller"))
defaultPluginRegistryImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_plugin_registry"))
defaultDevfileRegistryImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devfile_registry"))
@ -302,10 +300,6 @@ func DefaultDevworkspaceControllerImage(cr *orgv1.CheCluster) string {
return patchDefaultImageName(cr, defaultDevworkspaceControllerImage)
}
func DefaultDevworkspaceCheOperatorImage(cr *orgv1.CheCluster) string {
return patchDefaultImageName(cr, defaultDevworkspaceCheOperatorImage)
}
func DefaultKeycloakImage(cr *orgv1.CheCluster) string {
return patchDefaultImageName(cr, defaultKeycloakImage)
}
@ -457,7 +451,6 @@ func InitDefaultsFromEnv() {
defaultCheVersion = getDefaultFromEnv("CHE_VERSION")
defaultCheServerImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_che_server"))
defaultDashboardImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_dashboard"))
defaultDevworkspaceCheOperatorImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_che_operator"))
defaultDevworkspaceControllerImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_controller"))
defaultPluginRegistryImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_plugin_registry"))
defaultDevfileRegistryImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devfile_registry"))

View File

@ -17,6 +17,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
orgv1 "github.com/eclipse-che/che-operator/api/v1"
@ -46,17 +47,13 @@ var (
DevWorkspaceDeploymentName = "devworkspace-controller-manager"
SubscriptionResourceName = "subscriptions"
CheManagerResourcename = "chemanagers"
ClusterServiceVersionResourceName = "clusterserviceversions"
DevWorkspaceCSVNameWithouVersion = "devworkspace-operator"
OpenshiftDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/openshift/objects"
OpenshiftDevWorkspaceCheTemplatesPath = "/tmp/devworkspace-che-operator/templates/deployment/openshift/objects"
KubernetesDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/kubernetes/objects"
KubernetesDevWorkspaceCheTemplatesPath = "/tmp/devworkspace-che-operator/templates/deployment/kubernetes/objects"
OpenshiftDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/openshift/objects"
KubernetesDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/kubernetes/objects"
DevWorkspaceTemplates = devWorkspaceTemplatesPath()
DevWorkspaceCheTemplates = devWorkspaceCheTemplatesPath()
DevWorkspaceTemplates = devWorkspaceTemplatesPath()
DevWorkspaceServiceAccountFile = DevWorkspaceTemplates + "/devworkspace-controller-serviceaccount.ServiceAccount.yaml"
DevWorkspaceRoleFile = DevWorkspaceTemplates + "/devworkspace-controller-leader-election-role.Role.yaml"
@ -108,6 +105,13 @@ var (
syncDwConfigMap,
syncDwDeployment,
}
// Exits the operator after successful fresh installation of the devworkspace.
// Can be replaced with something less drastic (especially useful in tests)
afterInstall = func() {
logrus.Warn("Exitting the operator after DevWorkspace installation. DevWorkspace support will be initialized on the next start.")
os.Exit(1)
}
)
func ReconcileDevWorkspace(deployContext *deploy.DeployContext) (bool, error) {
@ -150,14 +154,21 @@ func ReconcileDevWorkspace(deployContext *deploy.DeployContext) (bool, error) {
}
for _, syncItem := range syncItems {
done, err := syncItem(deployContext)
_, err := syncItem(deployContext)
if !util.IsTestMode() {
if !done {
if err != nil {
return false, err
}
}
}
if !devWorkspaceWebhookExists && !util.IsTestMode() {
// the webhook did not exist in the cluster
// this means that we're installing devworkspace and therefore need to restart
// so that devworkspace support can initialize during the operator startup
afterInstall()
}
return true, nil
}
@ -203,7 +214,7 @@ func checkWebTerminalSubscription(deployContext *deploy.DeployContext) error {
return err
}
return errors.New("A non matching version of the Dev Workspace operator is already installed")
return errors.New("a non matching version of the Dev Workspace operator is already installed")
}
func createDwNamespace(deployContext *deploy.DeployContext) (bool, error) {
@ -464,10 +475,3 @@ func devWorkspaceTemplatesPath() string {
}
return KubernetesDevWorkspaceTemplatesPath
}
func devWorkspaceCheTemplatesPath() string {
if util.IsOpenShift {
return OpenshiftDevWorkspaceCheTemplatesPath
}
return KubernetesDevWorkspaceCheTemplatesPath
}

View File

@ -116,14 +116,6 @@ func TestReconcileDevWorkspace(t *testing.T) {
t.Run(testCase.name, func(t *testing.T) {
deployContext := deploy.GetTestDeployContext(testCase.cheCluster, []runtime.Object{})
deployContext.ClusterAPI.Scheme.AddKnownTypes(operatorsv1alpha1.SchemeGroupVersion, &operatorsv1alpha1.Subscription{})
deployContext.ClusterAPI.Scheme.AddKnownTypes(operatorsv1alpha1.SchemeGroupVersion, &operatorsv1alpha1.ClusterServiceVersion{})
deployContext.ClusterAPI.DiscoveryClient.(*fakeDiscovery.FakeDiscovery).Fake.Resources = []*metav1.APIResourceList{
{
APIResources: []metav1.APIResource{
{Name: CheManagerResourcename},
},
},
}
util.IsOpenShift = testCase.IsOpenShift
util.IsOpenShift4 = testCase.IsOpenShift4
@ -183,7 +175,7 @@ func TestReconcileDevWorkspaceShouldThrowErrorIfWebTerminalSubscriptionExists(t
util.IsOpenShift4 = true
_, err := ReconcileDevWorkspace(deployContext)
if err == nil || err.Error() != "A non matching version of the Dev Workspace operator is already installed" {
if err == nil || err.Error() != "a non matching version of the Dev Workspace operator is already installed" {
t.Fatalf("Error should be thrown")
}
}

View File

@ -26,6 +26,8 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
type k8s struct {
@ -188,3 +190,20 @@ func (cl *k8s) IsResourceOperationPermitted(resourceAttr *authorizationv1.Resour
return ssar.Status.Allowed, nil
}
func InNamespaceEventFilter(namespace string) predicate.Predicate {
return predicate.Funcs{
CreateFunc: func(ce event.CreateEvent) bool {
return namespace == ce.Meta.GetNamespace()
},
DeleteFunc: func(de event.DeleteEvent) bool {
return namespace == de.Meta.GetNamespace()
},
UpdateFunc: func(ue event.UpdateEvent) bool {
return namespace == ue.MetaNew.GetNamespace()
},
GenericFunc: func(ge event.GenericEvent) bool {
return namespace == ge.Meta.GetNamespace()
},
}
}

277
vendor/github.com/devfile/api/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,277 @@
Eclipse Public License - v 2.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial content
Distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from
and are Distributed by that particular Contributor. A Contribution
"originates" from a Contributor if it was added to the Program by
such Contributor itself or anyone acting on such Contributor's behalf.
Contributions do not include changes or additions to the Program that
are not Modified Works.
"Contributor" means any person or entity that Distributes the Program.
"Licensed Patents" mean patent claims licensable by a Contributor which
are necessarily infringed by the use or sale of its Contribution alone
or when combined with the Program.
"Program" means the Contributions Distributed in accordance with this
Agreement.
"Recipient" means anyone who receives the Program under this Agreement
or any Secondary License (as applicable), including Contributors.
"Derivative Works" shall mean any work, whether in Source Code or other
form, that is based on (or derived from) the Program and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship.
"Modified Works" shall mean any work in Source Code or other form that
results from an addition to, deletion from, or modification of the
contents of the Program, including, for purposes of clarity any new file
in Source Code form that contains any contents of the Program. Modified
Works shall not include works that contain only declarations,
interfaces, types, classes, structures, or files of the Program solely
in each case in order to link to, bind by name, or subclass the Program
or Modified Works thereof.
"Distribute" means the acts of a) distributing or b) making available
in any manner that enables the transfer of a copy.
"Source Code" means the form of a Program preferred for making
modifications, including but not limited to software source code,
documentation source, and configuration files.
"Secondary License" means either the GNU General Public License,
Version 2.0, or any later versions of that license, including any
exceptions or additional permissions as identified by the initial
Contributor.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby
grants Recipient a non-exclusive, worldwide, royalty-free copyright
license to reproduce, prepare Derivative Works of, publicly display,
publicly perform, Distribute and sublicense the Contribution of such
Contributor, if any, and such Derivative Works.
b) Subject to the terms of this Agreement, each Contributor hereby
grants Recipient a non-exclusive, worldwide, royalty-free patent
license under Licensed Patents to make, use, sell, offer to sell,
import and otherwise transfer the Contribution of such Contributor,
if any, in Source Code or other form. This patent license shall
apply to the combination of the Contribution and the Program if, at
the time the Contribution is added by the Contributor, such addition
of the Contribution causes such combination to be covered by the
Licensed Patents. The patent license shall not apply to any other
combinations which include the Contribution. No hardware per se is
licensed hereunder.
c) Recipient understands that although each Contributor grants the
licenses to its Contributions set forth herein, no assurances are
provided by any Contributor that the Program does not infringe the
patent or other intellectual property rights of any other entity.
Each Contributor disclaims any liability to Recipient for claims
brought by any other entity based on infringement of intellectual
property rights or otherwise. As a condition to exercising the
rights and licenses granted hereunder, each Recipient hereby
assumes sole responsibility to secure any other intellectual
property rights needed, if any. For example, if a third party
patent license is required to allow Recipient to Distribute the
Program, it is Recipient's responsibility to acquire that license
before distributing the Program.
d) Each Contributor represents that to its knowledge it has
sufficient copyright rights in its Contribution, if any, to grant
the copyright license set forth in this Agreement.
e) Notwithstanding the terms of any Secondary License, no
Contributor makes additional grants to any Recipient (other than
those set forth in this Agreement) as a result of such Recipient's
receipt of the Program under the terms of a Secondary License
(if permitted under the terms of Section 3).
3. REQUIREMENTS
3.1 If a Contributor Distributes the Program in any form, then:
a) the Program must also be made available as Source Code, in
accordance with section 3.2, and the Contributor must accompany
the Program with a statement that the Source Code for the Program
is available under this Agreement, and informs Recipients how to
obtain it in a reasonable manner on or through a medium customarily
used for software exchange; and
b) the Contributor may Distribute the Program under a license
different than this Agreement, provided that such license:
i) effectively disclaims on behalf of all other Contributors all
warranties and conditions, express and implied, including
warranties or conditions of title and non-infringement, and
implied warranties or conditions of merchantability and fitness
for a particular purpose;
ii) effectively excludes on behalf of all other Contributors all
liability for damages, including direct, indirect, special,
incidental and consequential damages, such as lost profits;
iii) does not attempt to limit or alter the recipients' rights
in the Source Code under section 3.2; and
iv) requires any subsequent distribution of the Program by any
party to be under a license that satisfies the requirements
of this section 3.
3.2 When the Program is Distributed as Source Code:
a) it must be made available under this Agreement, or if the
Program (i) is combined with other material in a separate file or
files made available under a Secondary License, and (ii) the initial
Contributor attached to the Source Code the notice described in
Exhibit A of this Agreement, then the Program may be made available
under the terms of such Secondary Licenses, and
b) a copy of this Agreement must be included with each copy of
the Program.
3.3 Contributors may not remove or alter any copyright, patent,
trademark, attribution notices, disclaimers of warranty, or limitations
of liability ("notices") contained within the Program from any copy of
the Program which they Distribute, provided that Contributors may add
their own appropriate notices.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities
with respect to end users, business partners and the like. While this
license is intended to facilitate the commercial use of the Program,
the Contributor who includes the Program in a commercial product
offering should do so in a manner which does not create potential
liability for other Contributors. Therefore, if a Contributor includes
the Program in a commercial product offering, such Contributor
("Commercial Contributor") hereby agrees to defend and indemnify every
other Contributor ("Indemnified Contributor") against any losses,
damages and costs (collectively "Losses") arising from claims, lawsuits
and other legal actions brought by a third party against the Indemnified
Contributor to the extent caused by the acts or omissions of such
Commercial Contributor in connection with its distribution of the Program
in a commercial product offering. The obligations in this section do not
apply to any claims or Losses relating to any actual or alleged
intellectual property infringement. In order to qualify, an Indemnified
Contributor must: a) promptly notify the Commercial Contributor in
writing of such claim, and b) allow the Commercial Contributor to control,
and cooperate with the Commercial Contributor in, the defense and any
related settlement negotiations. The Indemnified Contributor may
participate in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial
product offering, Product X. That Contributor is then a Commercial
Contributor. If that Commercial Contributor then makes performance
claims, or offers warranties related to Product X, those performance
claims and warranties are such Commercial Contributor's responsibility
alone. Under this section, the Commercial Contributor would have to
defend claims against the other Contributors related to those performance
claims and warranties, and if a court requires any other Contributor to
pay any damages as a result, the Commercial Contributor must pay
those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF
TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
PURPOSE. Each Recipient is solely responsible for determining the
appropriateness of using and distributing the Program and assumes all
risks associated with its exercise of rights under this Agreement,
including but not limited to the risks and costs of program errors,
compliance with applicable laws, damage to or loss of data, programs
or equipment, and unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS
SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of
the remainder of the terms of this Agreement, and without further
action by the parties hereto, such provision shall be reformed to the
minimum extent necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that the
Program itself (excluding combinations of the Program with other software
or hardware) infringes such Recipient's patent(s), then such Recipient's
rights granted under Section 2(b) shall terminate as of the date such
litigation is filed.
All Recipient's rights under this Agreement shall terminate if it
fails to comply with any of the material terms or conditions of this
Agreement and does not cure such failure in a reasonable period of
time after becoming aware of such noncompliance. If all Recipient's
rights under this Agreement terminate, Recipient agrees to cease use
and distribution of the Program as soon as reasonably practicable.
However, Recipient's obligations under this Agreement and any licenses
granted by Recipient relating to the Program shall continue and survive.
Everyone is permitted to copy and distribute copies of this Agreement,
but in order to avoid inconsistency the Agreement is copyrighted and
may only be modified in the following manner. The Agreement Steward
reserves the right to publish new versions (including revisions) of
this Agreement from time to time. No one other than the Agreement
Steward has the right to modify this Agreement. The Eclipse Foundation
is the initial Agreement Steward. The Eclipse Foundation may assign the
responsibility to serve as the Agreement Steward to a suitable separate
entity. Each new version of the Agreement will be given a distinguishing
version number. The Program (including Contributions) may always be
Distributed subject to the version of the Agreement under which it was
received. In addition, after a new version of the Agreement is published,
Contributor may elect to Distribute the Program (including its
Contributions) under the new version.
Except as expressly stated in Sections 2(a) and 2(b) above, Recipient
receives no rights or licenses to the intellectual property of any
Contributor under this Agreement, whether expressly, by implication,
estoppel or otherwise. All rights in the Program not expressly granted
under this Agreement are reserved. Nothing in this Agreement is intended
to be enforceable by any entity that is not a Contributor or Recipient.
No third-party beneficiary rights are created under this Agreement.
Exhibit A - Form of Secondary Licenses Notice
"This Source Code may also be made available under the following
Secondary Licenses when the conditions for such availability set forth
in the Eclipse Public License, v. 2.0 are satisfied: {name license(s),
version(s), and exceptions or additional permissions here}."
Simply including a copy of this Agreement, including this Exhibit A
is not sufficient to license the Source Code under Secondary Licenses.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to
look for such a notice.
You may add additional accurate notices of copyright ownership.

View File

@ -0,0 +1,182 @@
package v1alpha2
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// CommandType describes the type of command.
// Only one of the following command type may be specified.
// +kubebuilder:validation:Enum=Exec;Apply;Composite;Custom
type CommandType string
const (
ExecCommandType CommandType = "Exec"
ApplyCommandType CommandType = "Apply"
CompositeCommandType CommandType = "Composite"
CustomCommandType CommandType = "Custom"
)
// CommandGroupKind describes the kind of command group.
// +kubebuilder:validation:Enum=build;run;test;debug
type CommandGroupKind string
const (
BuildCommandGroupKind CommandGroupKind = "build"
RunCommandGroupKind CommandGroupKind = "run"
TestCommandGroupKind CommandGroupKind = "test"
DebugCommandGroupKind CommandGroupKind = "debug"
)
type CommandGroup struct {
// Kind of group the command is part of
Kind CommandGroupKind `json:"kind"`
// +optional
// Identifies the default command for a given group kind
IsDefault bool `json:"isDefault,omitempty"`
}
type BaseCommand struct {
// +optional
// Defines the group this command is part of
Group *CommandGroup `json:"group,omitempty"`
}
type LabeledCommand struct {
BaseCommand `json:",inline"`
// +optional
// Optional label that provides a label for this command
// to be used in Editor UI menus for example
Label string `json:"label,omitempty"`
}
type Command struct {
// Mandatory identifier that allows referencing
// this command in composite commands, from
// a parent, or in events.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Id string `json:"id"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
CommandUnion `json:",inline"`
}
// +union
type CommandUnion struct {
// Type of devworkspace command
// +unionDiscriminator
// +optional
CommandType CommandType `json:"commandType,omitempty"`
// CLI Command executed in an existing component container
// +optional
Exec *ExecCommand `json:"exec,omitempty"`
// Command that consists in applying a given component definition,
// typically bound to a devworkspace event.
//
// For example, when an `apply` command is bound to a `preStart` event,
// and references a `container` component, it will start the container as a
// K8S initContainer in the devworkspace POD, unless the component has its
// `dedicatedPod` field set to `true`.
//
// When no `apply` command exist for a given component,
// it is assumed the component will be applied at devworkspace start
// by default.
// +optional
Apply *ApplyCommand `json:"apply,omitempty"`
// Composite command that allows executing several sub-commands
// either sequentially or concurrently
// +optional
Composite *CompositeCommand `json:"composite,omitempty"`
// Custom command whose logic is implementation-dependant
// and should be provided by the user
// possibly through some dedicated plugin
// +optional
// +devfile:overrides:include:omit=true
Custom *CustomCommand `json:"custom,omitempty"`
}
type ExecCommand struct {
LabeledCommand `json:",inline"`
// The actual command-line string
//
// Special variables that can be used:
//
// - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping.
//
// - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/<project-name>). If there are multiple projects, this will point to the directory of the first one.
CommandLine string `json:"commandLine"`
// Describes component to which given action relates
//
Component string `json:"component"`
// Working directory where the command should be executed
//
// Special variables that can be used:
//
// - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping.
//
// - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/<project-name>). If there are multiple projects, this will point to the directory of the first one.
// +optional
WorkingDir string `json:"workingDir,omitempty"`
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// Optional list of environment variables that have to be set
// before running the command
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
// Whether the command is capable to reload itself when source code changes.
// If set to `true` the command won't be restarted and it is expected to handle file changes on its own.
//
// Default value is `false`
HotReloadCapable bool `json:"hotReloadCapable,omitempty"`
}
type ApplyCommand struct {
LabeledCommand `json:",inline"`
// Describes component that will be applied
//
Component string `json:"component"`
}
type CompositeCommand struct {
LabeledCommand `json:",inline"`
// The commands that comprise this composite command
Commands []string `json:"commands,omitempty" patchStrategy:"replace"`
// Indicates if the sub-commands should be executed concurrently
// +optional
Parallel bool `json:"parallel,omitempty"`
}
type CustomCommand struct {
LabeledCommand `json:",inline"`
// Class of command that the associated implementation component
// should use to process this command with the appropriate logic
CommandClass string `json:"commandClass"`
// Additional free-form configuration for this custom command
// that the implementation component will know how to use
//
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:EmbeddedResource
EmbeddedResource runtime.RawExtension `json:"embeddedResource"`
}

View File

@ -0,0 +1,93 @@
package v1alpha2
// Component that allows the developer to add a configured container into their devworkspace
type ContainerComponent struct {
BaseComponent `json:",inline"`
Container `json:",inline"`
Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
}
type Container struct {
Image string `json:"image"`
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// Environment variables used in this container.
//
// The following variables are reserved and cannot be overridden via env:
//
// - `$PROJECTS_ROOT`
//
// - `$PROJECT_SOURCE`
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
// List of volumes mounts that should be mounted is this container.
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
MemoryLimit string `json:"memoryLimit,omitempty"`
// +optional
MemoryRequest string `json:"memoryRequest,omitempty"`
// +optional
CpuLimit string `json:"cpuLimit,omitempty"`
// +optional
CpuRequest string `json:"cpuRequest,omitempty"`
// The command to run in the dockerimage component instead of the default one provided in the image.
//
// Defaults to an empty array, meaning use whatever is defined in the image.
// +optional
Command []string `json:"command,omitempty" patchStrategy:"replace"`
// The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.
//
// Defaults to an empty array, meaning use whatever is defined in the image.
// +optional
Args []string `json:"args,omitempty" patchStrategy:"replace"`
// Toggles whether or not the project source code should
// be mounted in the component.
//
// Defaults to true for all component types except plugins and components that set `dedicatedPod` to true.
// +optional
MountSources *bool `json:"mountSources,omitempty"`
// Optional specification of the path in the container where
// project sources should be transferred/mounted when `mountSources` is `true`.
// When omitted, the default value of /projects is used.
// +optional
// +kubebuilder:default=/projects
SourceMapping string `json:"sourceMapping,omitempty"`
// Specify if a container should run in its own separated pod,
// instead of running as part of the main development environment pod.
//
// Default value is `false`
// +optional
DedicatedPod bool `json:"dedicatedPod,omitempty"`
}
type EnvVar struct {
Name string `json:"name" yaml:"name"`
Value string `json:"value" yaml:"value"`
}
// Volume that should be mounted to a component container
type VolumeMount struct {
// The volume mount name is the name of an existing `Volume` component.
// If several containers mount the same volume name
// then they will reuse the same volume and will be able to access to the same files.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// The path in the component container where the volume should be mounted.
// If not path is mentioned, default path is the is `/<name>`.
// +optional
Path string `json:"path,omitempty"`
}

View File

@ -0,0 +1,45 @@
package v1alpha2
// K8sLikeComponentLocationType describes the type of
// the location the configuration is fetched from.
// Only one of the following component type may be specified.
// +kubebuilder:validation:Enum=Uri;Inlined
type K8sLikeComponentLocationType string
const (
UriK8sLikeComponentLocationType K8sLikeComponentLocationType = "Uri"
InlinedK8sLikeComponentLocationType K8sLikeComponentLocationType = "Inlined"
)
// +union
type K8sLikeComponentLocation struct {
// Type of Kubernetes-like location
// +
// +unionDiscriminator
// +optional
LocationType K8sLikeComponentLocationType `json:"locationType,omitempty"`
// Location in a file fetched from a uri.
// +optional
Uri string `json:"uri,omitempty"`
// Inlined manifest
// +optional
Inlined string `json:"inlined,omitempty"`
}
type K8sLikeComponent struct {
BaseComponent `json:",inline"`
K8sLikeComponentLocation `json:",inline"`
Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
}
// Component that allows partly importing Kubernetes resources into the devworkspace POD
type KubernetesComponent struct {
K8sLikeComponent `json:",inline"`
}
// Component that allows partly importing Openshift resources into the devworkspace POD
type OpenshiftComponent struct {
K8sLikeComponent `json:",inline"`
}

View File

@ -0,0 +1,7 @@
package v1alpha2
type PluginComponent struct {
BaseComponent `json:",inline"`
ImportReference `json:",inline"`
PluginOverrides `json:",inline"`
}

View File

@ -0,0 +1,19 @@
package v1alpha2
// Component that allows the developer to declare and configure a volume into their devworkspace
type VolumeComponent struct {
BaseComponent `json:",inline"`
Volume `json:",inline"`
}
// Volume that should be mounted to a component container
type Volume struct {
// +optional
// Size of the volume
Size string `json:"size,omitempty"`
// +optional
// Ephemeral volumes are not stored persistently across restarts. Defaults
// to false
Ephemeral bool `json:"ephemeral,omitempty"`
}

View File

@ -0,0 +1,104 @@
package v1alpha2
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// ComponentType describes the type of component.
// Only one of the following component type may be specified.
// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Plugin;Custom
type ComponentType string
const (
ContainerComponentType ComponentType = "Container"
KubernetesComponentType ComponentType = "Kubernetes"
OpenshiftComponentType ComponentType = "Openshift"
PluginComponentType ComponentType = "Plugin"
VolumeComponentType ComponentType = "Volume"
CustomComponentType ComponentType = "Custom"
)
// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context
// to the devworkspace, in order to make working in it easier.
type BaseComponent struct {
}
//+k8s:openapi-gen=true
type Component struct {
// Mandatory name that allows referencing the component
// from other elements (such as commands) or from an external
// devfile that may reference this component through a parent or a plugin.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
ComponentUnion `json:",inline"`
}
// +union
type ComponentUnion struct {
// Type of component
//
// +unionDiscriminator
// +optional
ComponentType ComponentType `json:"componentType,omitempty"`
// Allows adding and configuring devworkspace-related containers
// +optional
Container *ContainerComponent `json:"container,omitempty"`
// Allows importing into the devworkspace the Kubernetes resources
// defined in a given manifest. For example this allows reusing the Kubernetes
// definitions used to deploy some runtime components in production.
//
// +optional
Kubernetes *KubernetesComponent `json:"kubernetes,omitempty"`
// Allows importing into the devworkspace the OpenShift resources
// defined in a given manifest. For example this allows reusing the OpenShift
// definitions used to deploy some runtime components in production.
//
// +optional
Openshift *OpenshiftComponent `json:"openshift,omitempty"`
// Allows specifying the definition of a volume
// shared by several other components
// +optional
Volume *VolumeComponent `json:"volume,omitempty"`
// Allows importing a plugin.
//
// Plugins are mainly imported devfiles that contribute components, commands
// and events as a consistent single unit. They are defined in either YAML files
// following the devfile syntax,
// or as `DevWorkspaceTemplate` Kubernetes Custom Resources
// +optional
// +devfile:overrides:include:omitInPlugin=true
Plugin *PluginComponent `json:"plugin,omitempty"`
// Custom component whose logic is implementation-dependant
// and should be provided by the user
// possibly through some dedicated controller
// +optional
// +devfile:overrides:include:omit=true
Custom *CustomComponent `json:"custom,omitempty"`
}
type CustomComponent struct {
// Class of component that the associated implementation controller
// should use to process this command with the appropriate logic
ComponentClass string `json:"componentClass"`
// Additional free-form configuration for this custom component
// that the implementation controller will know how to use
//
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:EmbeddedResource
EmbeddedResource runtime.RawExtension `json:"embeddedResource"`
}

View File

@ -0,0 +1,14 @@
package v1alpha2
import (
"github.com/devfile/api/v2/pkg/devfile"
)
// Devfile describes the structure of a cloud-native devworkspace and development environment.
// +k8s:deepcopy-gen=false
// +devfile:jsonschema:generate:omitCustomUnionMembers=true,omitPluginUnionMembers=true
type Devfile struct {
devfile.DevfileHeader `json:",inline"`
DevWorkspaceTemplateSpec `json:",inline"`
}

View File

@ -0,0 +1,4 @@
package v1alpha2
// Hub marks this type as a conversion hub.
func (*DevWorkspace) Hub() {}

View File

@ -0,0 +1,97 @@
package v1alpha2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DevWorkspaceSpec defines the desired state of DevWorkspace
type DevWorkspaceSpec struct {
Started bool `json:"started"`
RoutingClass string `json:"routingClass,omitempty"`
Template DevWorkspaceTemplateSpec `json:"template,omitempty"`
}
// DevWorkspaceStatus defines the observed state of DevWorkspace
type DevWorkspaceStatus struct {
// Id of the DevWorkspace
DevWorkspaceId string `json:"devworkspaceId"`
// Main URL for this DevWorkspace
MainUrl string `json:"mainUrl,omitempty"`
Phase DevWorkspacePhase `json:"phase,omitempty"`
// Conditions represent the latest available observations of an object's state
Conditions []DevWorkspaceCondition `json:"conditions,omitempty"`
// Message is a short user-readable message giving additional information
// about an object's state
Message string `json:"message,omitempty"`
}
type DevWorkspacePhase string
// Valid devworkspace Statuses
const (
DevWorkspaceStatusStarting DevWorkspacePhase = "Starting"
DevWorkspaceStatusRunning DevWorkspacePhase = "Running"
DevWorkspaceStatusStopped DevWorkspacePhase = "Stopped"
DevWorkspaceStatusStopping DevWorkspacePhase = "Stopping"
DevWorkspaceStatusFailed DevWorkspacePhase = "Failed"
DevWorkspaceStatusError DevWorkspacePhase = "Error"
)
// DevWorkspaceCondition contains details for the current condition of this devworkspace.
type DevWorkspaceCondition struct {
// Type is the type of the condition.
Type DevWorkspaceConditionType `json:"type"`
// Phase is the status of the condition.
// Can be True, False, Unknown.
Status corev1.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Unique, one-word, CamelCase reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human-readable message indicating details about last transition.
Message string `json:"message,omitempty"`
}
// Types of conditions reported by devworkspace
type DevWorkspaceConditionType string
const (
DevWorkspaceComponentsReady DevWorkspaceConditionType = "ComponentsReady"
DevWorkspaceRoutingReady DevWorkspaceConditionType = "RoutingReady"
DevWorkspaceServiceAccountReady DevWorkspaceConditionType = "ServiceAccountReady"
DevWorkspaceReady DevWorkspaceConditionType = "Ready"
DevWorkspaceFailedStart DevWorkspaceConditionType = "FailedStart"
DevWorkspaceError DevWorkspaceConditionType = "Error"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspace is the Schema for the devworkspaces API
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=devworkspaces,scope=Namespaced,shortName=dw
// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".status.devworkspaceId",description="The devworkspace's unique id"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current devworkspace startup phase"
// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional information about the devworkspace"
// +devfile:jsonschema:generate
// +kubebuilder:storageversion
type DevWorkspace struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DevWorkspaceSpec `json:"spec,omitempty"`
Status DevWorkspaceStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspaceList contains a list of DevWorkspace
type DevWorkspaceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DevWorkspace `json:"items"`
}
func init() {
SchemeBuilder.Register(&DevWorkspace{}, &DevWorkspaceList{})
}

View File

@ -0,0 +1,4 @@
package v1alpha2
// Hub marks this type as a conversion hub.
func (*DevWorkspaceTemplate) Hub() {}

View File

@ -0,0 +1,79 @@
package v1alpha2
import attributes "github.com/devfile/api/v2/pkg/attributes"
// Structure of the devworkspace. This is also the specification of a devworkspace template.
// +devfile:jsonschema:generate
type DevWorkspaceTemplateSpec struct {
// Parent devworkspace template
// +optional
Parent *Parent `json:"parent,omitempty"`
DevWorkspaceTemplateSpecContent `json:",inline"`
}
// +devfile:overrides:generate
type DevWorkspaceTemplateSpecContent struct {
// Map of key-value variables used for string replacement in the devfile. Values can can be referenced via {{variable-key}}
// to replace the corresponding value in string fields in the devfile. Replacement cannot be used for
//
// - schemaVersion, metadata, parent source
//
// - element identifiers, e.g. command id, component name, endpoint name, project name
//
// - references to identifiers, e.g. in events, a command's component, container's volume mount name
//
// - string enums, e.g. command group kind, endpoint exposure
// +optional
// +patchStrategy=merge
// +devfile:overrides:include:omitInPlugin=true,description=Overrides of variables encapsulated in a parent devfile.
Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +patchStrategy=merge
// +devfile:overrides:include:omitInPlugin=true,description=Overrides of attributes encapsulated in a parent devfile.
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"`
// List of the devworkspace components, such as editor and plugins,
// user-provided containers, or other types of components
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +devfile:overrides:include:description=Overrides of components encapsulated in a parent devfile or a plugin.
// +devfile:toplevellist
Components []Component `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// Projects worked on in the devworkspace, containing names and sources locations
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +devfile:overrides:include:omitInPlugin=true,description=Overrides of projects encapsulated in a parent devfile.
// +devfile:toplevellist
Projects []Project `json:"projects,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// StarterProjects is a project that can be used as a starting point when bootstrapping new projects
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +devfile:overrides:include:omitInPlugin=true,description=Overrides of starterProjects encapsulated in a parent devfile.
// +devfile:toplevellist
StarterProjects []StarterProject `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// Predefined, ready-to-use, devworkspace-related commands
// +optional
// +patchMergeKey=id
// +patchStrategy=merge
// +devfile:overrides:include:description=Overrides of commands encapsulated in a parent devfile or a plugin.
// +devfile:toplevellist
Commands []Command `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"`
// Bindings of commands to events.
// Each command is referred-to by its name.
// +optional
// +devfile:overrides:include:omit=true
Events *Events `json:"events,omitempty"`
}

View File

@ -0,0 +1,31 @@
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspaceTemplate is the Schema for the devworkspacetemplates API
// +kubebuilder:resource:path=devworkspacetemplates,scope=Namespaced,shortName=dwt
// +devfile:jsonschema:generate
// +kubebuilder:storageversion
type DevWorkspaceTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DevWorkspaceTemplateSpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspaceTemplateList contains a list of DevWorkspaceTemplate
type DevWorkspaceTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DevWorkspaceTemplate `json:"items"`
}
func init() {
SchemeBuilder.Register(&DevWorkspaceTemplate{}, &DevWorkspaceTemplateList{})
}

View File

@ -0,0 +1,6 @@
// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
// +groupName=workspace.devfile.io
// +devfile:jsonschema:version=2.2.0-alpha
package v1alpha2

View File

@ -0,0 +1,115 @@
package v1alpha2
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
)
// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint.
// Only one of the following protocols may be specified: http, ws, tcp, udp.
// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp
type EndpointProtocol string
const (
// Endpoint will have `http` traffic, typically on a TCP connection.
// It will be automaticaly promoted to `https` when the `secure` field is set to `true`
HTTPEndpointProtocol EndpointProtocol = "http"
// Endpoint will have `https` traffic, typically on a TCP connection
HTTPSEndpointProtocol EndpointProtocol = "https"
// Endpoint will have `ws` traffic, typically on a TCP connection
// It will be automaticaly promoted to `wss` when the `secure` field is set to `true`
WSEndpointProtocol EndpointProtocol = "ws"
// Endpoint will have `wss` traffic, typically on a TCP connection
WSSEndpointProtocol EndpointProtocol = "wss"
// Endpoint will have traffic on a TCP connection,
// without specifying an application protocol
TCPEndpointProtocol EndpointProtocol = "tcp"
// Endpoint will have traffic on an UDP connection,
// without specifying an application protocol
UDPEndpointProtocol EndpointProtocol = "udp"
)
// EndpointExposure describes the way an endpoint is exposed on the network.
// Only one of the following exposures may be specified: public, internal, none.
// +kubebuilder:validation:Enum=public;internal;none
type EndpointExposure string
const (
// Endpoint will be exposed on the public network, typically through
// a K8S ingress or an OpenShift route
PublicEndpointExposure EndpointExposure = "public"
// Endpoint will be exposed internally outside of the main devworkspace POD,
// typically by K8S services, to be consumed by other elements running
// on the same cloud internal network.
InternalEndpointExposure EndpointExposure = "internal"
// Endpoint will not be exposed and will only be accessible
// inside the main devworkspace POD, on a local address.
NoneEndpointExposure EndpointExposure = "none"
)
type Endpoint struct {
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
TargetPort int `json:"targetPort"`
// Describes how the endpoint should be exposed on the network.
//
// - `public` means that the endpoint will be exposed on the public network, typically through
// a K8S ingress or an OpenShift route.
//
// - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD,
// typically by K8S services, to be consumed by other elements running
// on the same cloud internal network.
//
// - `none` means that the endpoint will not be exposed and will only be accessible
// inside the main devworkspace POD, on a local address.
//
// Default value is `public`
// +optional
// +kubebuilder:default=public
Exposure EndpointExposure `json:"exposure,omitempty"`
// Describes the application and transport protocols of the traffic that will go through this endpoint.
//
// - `http`: Endpoint will have `http` traffic, typically on a TCP connection.
// It will be automaticaly promoted to `https` when the `secure` field is set to `true`.
//
// - `https`: Endpoint will have `https` traffic, typically on a TCP connection.
//
// - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection.
// It will be automaticaly promoted to `wss` when the `secure` field is set to `true`.
//
// - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection.
//
// - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol.
//
// - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol.
//
// Default value is `http`
// +optional
// +kubebuilder:default=http
Protocol EndpointProtocol `json:"protocol,omitempty"`
// Describes whether the endpoint should be secured and protected by some
// authentication process. This requires a protocol of `https` or `wss`.
// +optional
Secure bool `json:"secure,omitempty"`
// Path of the endpoint URL
// +optional
Path string `json:"path,omitempty"`
// Map of implementation-dependant string-based free-form attributes.
//
// Examples of Che-specific attributes:
//
// - cookiesAuthEnabled: "true" / "false",
//
// - type: "terminal" / "ide" / "ide-dev",
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
}

View File

@ -0,0 +1,26 @@
package v1alpha2
type Events struct {
DevWorkspaceEvents `json:",inline"`
}
type DevWorkspaceEvents struct {
// IDs of commands that should be executed before the devworkspace start.
// Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.
// +optional
PreStart []string `json:"preStart,omitempty"`
// IDs of commands that should be executed after the devworkspace is completely started.
// In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning.
// This means that those commands are not triggered until the user opens the IDE in his browser.
// +optional
PostStart []string `json:"postStart,omitempty"`
// +optional
// IDs of commands that should be executed before stopping the devworkspace.
PreStop []string `json:"preStop,omitempty"`
// +optional
// IDs of commands that should be executed after stopping the devworkspace.
PostStop []string `json:"postStop,omitempty"`
}

View File

@ -0,0 +1,53 @@
package v1alpha2
// ImportReferenceType describes the type of location
// from where the referenced template structure should be retrieved.
// Only one of the following parent locations may be specified.
// +kubebuilder:validation:Enum=Uri;Id;Kubernetes
type ImportReferenceType string
const (
UriImportReferenceType ImportReferenceType = "Uri"
IdImportReferenceType ImportReferenceType = "Id"
KubernetesImportReferenceType ImportReferenceType = "Kubernetes"
)
// Location from where the an import reference is retrieved
// +union
type ImportReferenceUnion struct {
// type of location from where the referenced template structure should be retrieved
// +
// +unionDiscriminator
// +optional
ImportReferenceType ImportReferenceType `json:"importReferenceType,omitempty"`
// URI Reference of a parent devfile YAML file.
// It can be a full URL or a relative URI with the current devfile as the base URI.
// +optional
Uri string `json:"uri,omitempty"`
// Id in a registry that contains a Devfile yaml file
// +optional
Id string `json:"id,omitempty"`
// Reference to a Kubernetes CRD of type DevWorkspaceTemplate
// +optional
Kubernetes *KubernetesCustomResourceImportReference `json:"kubernetes,omitempty"`
}
type KubernetesCustomResourceImportReference struct {
Name string `json:"name"`
// +optional
Namespace string `json:"namespace,omitempty"`
}
type ImportReference struct {
ImportReferenceUnion `json:",inline"`
// Registry URL to pull the parent devfile from when using id in the parent reference.
// To ensure the parent devfile gets resolved consistently in different environments,
// it is recommended to always specify the `regsitryURL` when `Id` is used.
// +optional
RegistryUrl string `json:"registryUrl,omitempty"`
}

View File

@ -0,0 +1,51 @@
package v1alpha2
// Keyed is expected to be implemented by the elements of the devfile top-level lists
// (such as Command, Component, Project, ...).
//
// The Keys of list objects will typically be used to merge the top-level lists
// according to strategic merge patch rules, during parent or plugin overriding.
// +k8s:deepcopy-gen=false
type Keyed interface {
// Key is a string that allows uniquely identifying the object,
// especially in the Devfile top-level lists that are map-like K8S-compatible lists.
Key() string
}
// KeyedList is a list of object that are uniquely identified by a Key
// The devfile top-level list (such as Commands, Components, Projects, ...)
// are examples of such lists of Keyed objects
// +k8s:deepcopy-gen=false
type KeyedList []Keyed
// GetKeys converts a KeyedList into a slice of string by calling Key() on each
// element in the list.
func (l KeyedList) GetKeys() []string {
var res []string
for _, keyed := range l {
res = append(res, keyed.Key())
}
return res
}
// TopLevelLists is a map that contains several Devfile top-level lists
// (such as `Commands`, `Components`, `Projects`, ...), available as `KeyedList`s.
//
// Each key of this map is the name of the field that contains the given top-level list:
// `Commands`, `Components`, etc...
// +k8s:deepcopy-gen=false
type TopLevelLists map[string]KeyedList
// TopLevelListContainer is an interface that allows retrieving the devfile top-level lists
// from an object.
// Main implementor of this interface will be the `DevWorkspaceTemplateSpecContent`, which
// will return all its devfile top-level lists.
//
// But this will also be implemented by `Overrides` which may return less top-level lists
// the `DevWorkspaceTemplateSpecContent`, according to the top-level lists they can override.
// `PluginOverride` will not return the `Projects` and `StarterProjects` list, since plugins are
// not expected to override `projects` or `starterProjects`
// +k8s:deepcopy-gen=false
type TopLevelListContainer interface {
GetToplevelLists() TopLevelLists
}

View File

@ -0,0 +1,41 @@
package v1alpha2
import (
"fmt"
"reflect"
)
func extractKeys(keyedList interface{}) []Keyed {
value := reflect.ValueOf(keyedList)
keys := make([]Keyed, 0, value.Len())
for i := 0; i < value.Len(); i++ {
elem := value.Index(i)
if elem.CanInterface() {
i := elem.Interface()
if keyed, ok := i.(Keyed); ok {
keys = append(keys, keyed)
}
}
}
return keys
}
// CheckDuplicateKeys checks if duplicate keys are present in the devfile objects
func CheckDuplicateKeys(keyedList interface{}) error {
seen := map[string]bool{}
value := reflect.ValueOf(keyedList)
for i := 0; i < value.Len(); i++ {
elem := value.Index(i)
if elem.CanInterface() {
i := elem.Interface()
if keyed, ok := i.(Keyed); ok {
key := keyed.Key()
if seen[key] {
return fmt.Errorf("duplicate key: %s", key)
}
seen[key] = true
}
}
}
return nil
}

View File

@ -0,0 +1,58 @@
package v1alpha2
// +kubebuilder:validation:Enum=replace;delete
type OverridingPatchDirective string
const (
ReplaceOverridingDirective OverridingPatchDirective = "replace"
DeleteOverridingDirective OverridingPatchDirective = "delete"
)
const (
DeleteFromPrimitiveListOverridingPatchDirective OverridingPatchDirective = "replace"
)
type OverrideDirective struct {
// Path of the element the directive should be applied on
//
// For the following path tree:
//
// ```json
// commands:
// - exec
// id: commandId
// ```
//
// the path would be: `commands["commandId"]`.
Path string `json:"path"`
// `$Patch` directlive as defined in
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#basic-patch-format
//
// This is an enumeration that allows the following values:
//
// - *replace*: indicates that the element matched by the `jsonPath` field should be replaced instead of being merged.
//
// - *delete*: indicates that the element matched by the `jsonPath` field should be deleted.
//
// +optional
Patch OverridingPatchDirective `json:"patch,omitempty"`
// `DeleteFromPrimitiveList` directive as defined in
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive
//
// This indicates that the elements in this list should be deleted from the original primitive list.
// The original primitive list is the element matched by the `jsonPath` field.
// +optional
DeleteFromPrimitiveList []string `json:"deleteFromPrimitiveList,omitempty"`
// `SetElementOrder` directive as defined in
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive
//
// This provides a way to specify the order of a list. The relative order specified in this directive will be retained.
// The list whose order is controller is the element matched by the `jsonPath` field.
// If the controller list is a list of objects, then the values in this list should be
// the merge keys of the objects to order.
// +optional
SetElementOrder []string `json:"setElementOrder,omitempty"`
}

View File

@ -0,0 +1,11 @@
package v1alpha2
// +k8s:deepcopy-gen=false
type Overrides interface {
TopLevelListContainer
isOverride()
}
// OverridesBase is used in the Overrides generator in order to provide a common base for the generated Overrides
// So please be careful when renaming
type OverridesBase struct{}

View File

@ -0,0 +1,6 @@
package v1alpha2
type Parent struct {
ImportReference `json:",inline"`
ParentOverrides `json:",inline"`
}

View File

@ -0,0 +1,128 @@
package v1alpha2
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
runtime "k8s.io/apimachinery/pkg/runtime"
)
type Project struct {
// Project name
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
// Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.
// +optional
ClonePath string `json:"clonePath,omitempty"`
ProjectSource `json:",inline"`
}
type StarterProject struct {
// Project name
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
// Description of a starter project
// +optional
Description string `json:"description,omitempty"`
// Sub-directory from a starter project to be used as root for starter project.
// +optional
SubDir string `json:"subDir,omitempty"`
ProjectSource `json:",inline"`
}
// ProjectSourceType describes the type of Project sources.
// Only one of the following project sources may be specified.
// If none of the following policies is specified, the default one
// is AllowConcurrent.
// +kubebuilder:validation:Enum=Git;Zip;Custom
type ProjectSourceType string
const (
GitProjectSourceType ProjectSourceType = "Git"
ZipProjectSourceType ProjectSourceType = "Zip"
CustomProjectSourceType ProjectSourceType = "Custom"
)
// +union
type ProjectSource struct {
// Type of project source
// +
// +unionDiscriminator
// +optional
SourceType ProjectSourceType `json:"sourceType,omitempty"`
// Project's Git source
// +optional
Git *GitProjectSource `json:"git,omitempty"`
// Project's Zip source
// +optional
Zip *ZipProjectSource `json:"zip,omitempty"`
// Project's Custom source
// +optional
// +devfile:overrides:include:omit=true
Custom *CustomProjectSource `json:"custom,omitempty"`
}
type CommonProjectSource struct {
}
type CustomProjectSource struct {
ProjectSourceClass string `json:"projectSourceClass"`
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:EmbeddedResource
EmbeddedResource runtime.RawExtension `json:"embeddedResource"`
}
type ZipProjectSource struct {
CommonProjectSource `json:",inline"`
// Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH
// +required
Location string `json:"location,omitempty"`
}
type GitLikeProjectSource struct {
CommonProjectSource `json:",inline"`
// Defines from what the project should be checked out. Required if there are more than one remote configured
// +optional
CheckoutFrom *CheckoutFrom `json:"checkoutFrom,omitempty"`
// The remotes map which should be initialized in the git project. Must have at least one remote configured
Remotes map[string]string `json:"remotes"`
}
type CheckoutFrom struct {
// The revision to checkout from. Should be branch name, tag or commit id.
// Default branch is used if missing or specified revision is not found.
// +optional
Revision string `json:"revision,omitempty"`
// The remote name should be used as init. Required if there are more than one remote configured
// +optional
Remote string `json:"remote,omitempty"`
}
type GitProjectSource struct {
GitLikeProjectSource `json:",inline"`
}

View File

@ -0,0 +1,22 @@
// NOTE: Boilerplate only. Ignore this file.
// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group
// +k8s:deepcopy-gen=package,register
// +groupName=workspace.devfile.io
package v1alpha2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "workspace.devfile.io", Version: "v1alpha2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,21 @@
package v1alpha2
// +k8s:deepcopy-gen=false
// Union is an interface that allows managing structs defined as
// Kubernetes unions with discriminators, according to the following KEP:
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190325-unions.md
type Union interface {
discriminator() *string
// Normalize allows normalizing the union, according to the following rules:
// - When only one field of the union is set and no discriminator is set, set the discriminator according to the union value.
// - When several fields are set and a discrimnator is set, remove (== reset to zero value) all the values that do not match the discriminator.
// - When only one union value is set and it matches discriminator, just do nothing.
// - In other case, something is inconsistent or ambiguous: an error is thrown.
Normalize() error
// Simplify allows removing the union discriminator,
// but only after normalizing it if necessary.
Simplify()
}

View File

@ -0,0 +1,103 @@
package v1alpha2
import (
"errors"
"reflect"
)
func visitUnion(union interface{}, visitor interface{}) (err error) {
visitorValue := reflect.ValueOf(visitor)
unionValue := reflect.ValueOf(union)
oneMemberPresent := false
typeOfVisitor := visitorValue.Type()
for i := 0; i < visitorValue.NumField(); i++ {
unionMemberToRead := typeOfVisitor.Field(i).Name
unionMember := unionValue.FieldByName(unionMemberToRead)
if !unionMember.IsZero() {
if oneMemberPresent {
err = errors.New("Only one element should be set in union: " + unionValue.Type().Name())
return
}
oneMemberPresent = true
visitorFunction := visitorValue.Field(i)
if visitorFunction.IsNil() {
return
}
results := visitorFunction.Call([]reflect.Value{unionMember})
if !results[0].IsNil() {
err = results[0].Interface().(error)
}
return
}
}
return
}
func simplifyUnion(union Union, visitorType reflect.Type) {
normalizeUnion(union, visitorType)
*union.discriminator() = ""
}
func normalizeUnion(union Union, visitorType reflect.Type) error {
err := updateDiscriminator(union, visitorType)
if err != nil {
return err
}
err = cleanupValues(union, visitorType)
if err != nil {
return err
}
return nil
}
func updateDiscriminator(union Union, visitorType reflect.Type) error {
unionValue := reflect.ValueOf(union)
if union.discriminator() == nil {
return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name())
}
if *union.discriminator() != "" {
// Nothing to do
return nil
}
oneMemberPresent := false
for i := 0; i < visitorType.NumField(); i++ {
unionMemberToRead := visitorType.Field(i).Name
unionMember := unionValue.Elem().FieldByName(unionMemberToRead)
if !unionMember.IsZero() {
if oneMemberPresent {
return errors.New("Discriminator cannot be deduced from 2 values in union: " + unionValue.Type().Name())
}
oneMemberPresent = true
*(union.discriminator()) = unionMemberToRead
}
}
return nil
}
func cleanupValues(union Union, visitorType reflect.Type) error {
unionValue := reflect.ValueOf(union)
if union.discriminator() == nil {
return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name())
}
if *union.discriminator() == "" {
// Nothing to do
return errors.New("Values cannot be cleaned up without a discriminator in union: " + unionValue.Type().Name())
}
for i := 0; i < visitorType.NumField(); i++ {
unionMemberToRead := visitorType.Field(i).Name
unionMember := unionValue.Elem().FieldByName(unionMemberToRead)
if !unionMember.IsZero() {
if unionMemberToRead != *union.discriminator() {
unionMember.Set(reflect.Zero(unionMember.Type()))
}
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
package v1alpha2
func (keyed Component) Key() string {
return keyed.Name
}
func (keyed Project) Key() string {
return keyed.Name
}
func (keyed StarterProject) Key() string {
return keyed.Name
}
func (keyed Command) Key() string {
return keyed.Id
}
func (keyed ComponentParentOverride) Key() string {
return keyed.Name
}
func (keyed ProjectParentOverride) Key() string {
return keyed.Name
}
func (keyed StarterProjectParentOverride) Key() string {
return keyed.Name
}
func (keyed CommandParentOverride) Key() string {
return keyed.Id
}
func (keyed ComponentPluginOverrideParentOverride) Key() string {
return keyed.Name
}
func (keyed CommandPluginOverrideParentOverride) Key() string {
return keyed.Id
}
func (keyed ComponentPluginOverride) Key() string {
return keyed.Name
}
func (keyed CommandPluginOverride) Key() string {
return keyed.Id
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,470 @@
package v1alpha2
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
)
// +devfile:jsonschema:generate
type PluginOverrides struct {
OverridesBase `json:",inline"`
// Overrides of components encapsulated in a parent devfile or a plugin.
// Overriding is done according to K8S strategic merge patch standard rules.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +devfile:toplevellist
Components []ComponentPluginOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// Overrides of commands encapsulated in a parent devfile or a plugin.
// Overriding is done according to K8S strategic merge patch standard rules.
// +optional
// +patchMergeKey=id
// +patchStrategy=merge
// +devfile:toplevellist
Commands []CommandPluginOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"`
}
//+k8s:openapi-gen=true
type ComponentPluginOverride struct {
// Mandatory name that allows referencing the component
// from other elements (such as commands) or from an external
// devfile that may reference this component through a parent or a plugin.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
ComponentUnionPluginOverride `json:",inline"`
}
type CommandPluginOverride struct {
// Mandatory identifier that allows referencing
// this command in composite commands, from
// a parent, or in events.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Id string `json:"id"`
// Map of implementation-dependant free-form YAML attributes.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
CommandUnionPluginOverride `json:",inline"`
}
// +union
type ComponentUnionPluginOverride struct {
// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume
// Type of component
//
// +unionDiscriminator
// +optional
ComponentType ComponentTypePluginOverride `json:"componentType,omitempty"`
// Allows adding and configuring devworkspace-related containers
// +optional
Container *ContainerComponentPluginOverride `json:"container,omitempty"`
// Allows importing into the devworkspace the Kubernetes resources
// defined in a given manifest. For example this allows reusing the Kubernetes
// definitions used to deploy some runtime components in production.
//
// +optional
Kubernetes *KubernetesComponentPluginOverride `json:"kubernetes,omitempty"`
// Allows importing into the devworkspace the OpenShift resources
// defined in a given manifest. For example this allows reusing the OpenShift
// definitions used to deploy some runtime components in production.
//
// +optional
Openshift *OpenshiftComponentPluginOverride `json:"openshift,omitempty"`
// Allows specifying the definition of a volume
// shared by several other components
// +optional
Volume *VolumeComponentPluginOverride `json:"volume,omitempty"`
}
// +union
type CommandUnionPluginOverride struct {
// +kubebuilder:validation:Enum=Exec;Apply;Composite
// Type of devworkspace command
// +unionDiscriminator
// +optional
CommandType CommandTypePluginOverride `json:"commandType,omitempty"`
// CLI Command executed in an existing component container
// +optional
Exec *ExecCommandPluginOverride `json:"exec,omitempty"`
// Command that consists in applying a given component definition,
// typically bound to a devworkspace event.
//
// For example, when an `apply` command is bound to a `preStart` event,
// and references a `container` component, it will start the container as a
// K8S initContainer in the devworkspace POD, unless the component has its
// `dedicatedPod` field set to `true`.
//
// When no `apply` command exist for a given component,
// it is assumed the component will be applied at devworkspace start
// by default.
// +optional
Apply *ApplyCommandPluginOverride `json:"apply,omitempty"`
// Composite command that allows executing several sub-commands
// either sequentially or concurrently
// +optional
Composite *CompositeCommandPluginOverride `json:"composite,omitempty"`
}
// ComponentType describes the type of component.
// Only one of the following component type may be specified.
type ComponentTypePluginOverride string
// Component that allows the developer to add a configured container into their devworkspace
type ContainerComponentPluginOverride struct {
BaseComponentPluginOverride `json:",inline"`
ContainerPluginOverride `json:",inline"`
Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
}
// Component that allows partly importing Kubernetes resources into the devworkspace POD
type KubernetesComponentPluginOverride struct {
K8sLikeComponentPluginOverride `json:",inline"`
}
// Component that allows partly importing Openshift resources into the devworkspace POD
type OpenshiftComponentPluginOverride struct {
K8sLikeComponentPluginOverride `json:",inline"`
}
// Component that allows the developer to declare and configure a volume into their devworkspace
type VolumeComponentPluginOverride struct {
BaseComponentPluginOverride `json:",inline"`
VolumePluginOverride `json:",inline"`
}
// CommandType describes the type of command.
// Only one of the following command type may be specified.
type CommandTypePluginOverride string
type ExecCommandPluginOverride struct {
LabeledCommandPluginOverride `json:",inline"`
// +optional
// The actual command-line string
//
// Special variables that can be used:
//
// - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping.
//
// - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/<project-name>). If there are multiple projects, this will point to the directory of the first one.
CommandLine string `json:"commandLine,omitempty"`
// +optional
// Describes component to which given action relates
//
Component string `json:"component,omitempty"`
// Working directory where the command should be executed
//
// Special variables that can be used:
//
// - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping.
//
// - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/<project-name>). If there are multiple projects, this will point to the directory of the first one.
// +optional
WorkingDir string `json:"workingDir,omitempty"`
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// Optional list of environment variables that have to be set
// before running the command
Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
// Whether the command is capable to reload itself when source code changes.
// If set to `true` the command won't be restarted and it is expected to handle file changes on its own.
//
// Default value is `false`
HotReloadCapable bool `json:"hotReloadCapable,omitempty"`
}
type ApplyCommandPluginOverride struct {
LabeledCommandPluginOverride `json:",inline"`
// +optional
// Describes component that will be applied
//
Component string `json:"component,omitempty"`
}
type CompositeCommandPluginOverride struct {
LabeledCommandPluginOverride `json:",inline"`
// The commands that comprise this composite command
Commands []string `json:"commands,omitempty" patchStrategy:"replace"`
// Indicates if the sub-commands should be executed concurrently
// +optional
Parallel bool `json:"parallel,omitempty"`
}
// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context
// to the devworkspace, in order to make working in it easier.
type BaseComponentPluginOverride struct {
}
type ContainerPluginOverride struct {
// +optional
Image string `json:"image,omitempty"`
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// Environment variables used in this container.
//
// The following variables are reserved and cannot be overridden via env:
//
// - `$PROJECTS_ROOT`
//
// - `$PROJECT_SOURCE`
Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
// List of volumes mounts that should be mounted is this container.
VolumeMounts []VolumeMountPluginOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// +optional
MemoryLimit string `json:"memoryLimit,omitempty"`
// +optional
MemoryRequest string `json:"memoryRequest,omitempty"`
// +optional
CpuLimit string `json:"cpuLimit,omitempty"`
// +optional
CpuRequest string `json:"cpuRequest,omitempty"`
// The command to run in the dockerimage component instead of the default one provided in the image.
//
// Defaults to an empty array, meaning use whatever is defined in the image.
// +optional
Command []string `json:"command,omitempty" patchStrategy:"replace"`
// The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.
//
// Defaults to an empty array, meaning use whatever is defined in the image.
// +optional
Args []string `json:"args,omitempty" patchStrategy:"replace"`
// Toggles whether or not the project source code should
// be mounted in the component.
//
// Defaults to true for all component types except plugins and components that set `dedicatedPod` to true.
// +optional
MountSources *bool `json:"mountSources,omitempty"`
// Optional specification of the path in the container where
// project sources should be transferred/mounted when `mountSources` is `true`.
// When omitted, the default value of /projects is used.
// +optional
SourceMapping string `json:"sourceMapping,omitempty"`
// Specify if a container should run in its own separated pod,
// instead of running as part of the main development environment pod.
//
// Default value is `false`
// +optional
DedicatedPod bool `json:"dedicatedPod,omitempty"`
}
type EndpointPluginOverride struct {
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// +optional
TargetPort int `json:"targetPort,omitempty"`
// Describes how the endpoint should be exposed on the network.
//
// - `public` means that the endpoint will be exposed on the public network, typically through
// a K8S ingress or an OpenShift route.
//
// - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD,
// typically by K8S services, to be consumed by other elements running
// on the same cloud internal network.
//
// - `none` means that the endpoint will not be exposed and will only be accessible
// inside the main devworkspace POD, on a local address.
//
// Default value is `public`
// +optional
Exposure EndpointExposurePluginOverride `json:"exposure,omitempty"`
// Describes the application and transport protocols of the traffic that will go through this endpoint.
//
// - `http`: Endpoint will have `http` traffic, typically on a TCP connection.
// It will be automaticaly promoted to `https` when the `secure` field is set to `true`.
//
// - `https`: Endpoint will have `https` traffic, typically on a TCP connection.
//
// - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection.
// It will be automaticaly promoted to `wss` when the `secure` field is set to `true`.
//
// - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection.
//
// - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol.
//
// - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol.
//
// Default value is `http`
// +optional
Protocol EndpointProtocolPluginOverride `json:"protocol,omitempty"`
// Describes whether the endpoint should be secured and protected by some
// authentication process. This requires a protocol of `https` or `wss`.
// +optional
Secure bool `json:"secure,omitempty"`
// Path of the endpoint URL
// +optional
Path string `json:"path,omitempty"`
// Map of implementation-dependant string-based free-form attributes.
//
// Examples of Che-specific attributes:
//
// - cookiesAuthEnabled: "true" / "false",
//
// - type: "terminal" / "ide" / "ide-dev",
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
}
type K8sLikeComponentPluginOverride struct {
BaseComponentPluginOverride `json:",inline"`
K8sLikeComponentLocationPluginOverride `json:",inline"`
Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
}
// Volume that should be mounted to a component container
type VolumePluginOverride struct {
// +optional
// Size of the volume
Size string `json:"size,omitempty"`
// +optional
// Ephemeral volumes are not stored persistently across restarts. Defaults
// to false
Ephemeral bool `json:"ephemeral,omitempty"`
}
type LabeledCommandPluginOverride struct {
BaseCommandPluginOverride `json:",inline"`
// +optional
// Optional label that provides a label for this command
// to be used in Editor UI menus for example
Label string `json:"label,omitempty"`
}
type EnvVarPluginOverride struct {
Name string `json:"name" yaml:"name"`
// +optional
Value string `json:"value,omitempty" yaml:"value"`
}
// Volume that should be mounted to a component container
type VolumeMountPluginOverride struct {
// The volume mount name is the name of an existing `Volume` component.
// If several containers mount the same volume name
// then they will reuse the same volume and will be able to access to the same files.
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MaxLength=63
Name string `json:"name"`
// The path in the component container where the volume should be mounted.
// If not path is mentioned, default path is the is `/<name>`.
// +optional
Path string `json:"path,omitempty"`
}
// EndpointExposure describes the way an endpoint is exposed on the network.
// Only one of the following exposures may be specified: public, internal, none.
// +kubebuilder:validation:Enum=public;internal;none
type EndpointExposurePluginOverride string
// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint.
// Only one of the following protocols may be specified: http, ws, tcp, udp.
// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp
type EndpointProtocolPluginOverride string
// +union
type K8sLikeComponentLocationPluginOverride struct {
// +kubebuilder:validation:Enum=Uri;Inlined
// Type of Kubernetes-like location
// +
// +unionDiscriminator
// +optional
LocationType K8sLikeComponentLocationTypePluginOverride `json:"locationType,omitempty"`
// Location in a file fetched from a uri.
// +optional
Uri string `json:"uri,omitempty"`
// Inlined manifest
// +optional
Inlined string `json:"inlined,omitempty"`
}
type BaseCommandPluginOverride struct {
// +optional
// Defines the group this command is part of
Group *CommandGroupPluginOverride `json:"group,omitempty"`
}
// K8sLikeComponentLocationType describes the type of
// the location the configuration is fetched from.
// Only one of the following component type may be specified.
type K8sLikeComponentLocationTypePluginOverride string
type CommandGroupPluginOverride struct {
// +optional
// Kind of group the command is part of
Kind CommandGroupKindPluginOverride `json:"kind,omitempty"`
// +optional
// Identifies the default command for a given group kind
IsDefault bool `json:"isDefault,omitempty"`
}
// CommandGroupKind describes the kind of command group.
// +kubebuilder:validation:Enum=build;run;test;debug
type CommandGroupKindPluginOverride string
func (overrides PluginOverrides) isOverride() {}

View File

@ -0,0 +1,33 @@
package v1alpha2
func (container DevWorkspaceTemplateSpecContent) GetToplevelLists() TopLevelLists {
return TopLevelLists{
"Components": extractKeys(container.Components),
"Projects": extractKeys(container.Projects),
"StarterProjects": extractKeys(container.StarterProjects),
"Commands": extractKeys(container.Commands),
}
}
func (container ParentOverrides) GetToplevelLists() TopLevelLists {
return TopLevelLists{
"Components": extractKeys(container.Components),
"Projects": extractKeys(container.Projects),
"StarterProjects": extractKeys(container.StarterProjects),
"Commands": extractKeys(container.Commands),
}
}
func (container PluginOverridesParentOverride) GetToplevelLists() TopLevelLists {
return TopLevelLists{
"Components": extractKeys(container.Components),
"Commands": extractKeys(container.Commands),
}
}
func (container PluginOverrides) GetToplevelLists() TopLevelLists {
return TopLevelLists{
"Components": extractKeys(container.Components),
"Commands": extractKeys(container.Commands),
}
}

View File

@ -0,0 +1,360 @@
package v1alpha2
import (
"reflect"
)
var commandUnion reflect.Type = reflect.TypeOf(CommandUnionVisitor{})
func (union CommandUnion) Visit(visitor CommandUnionVisitor) error {
return visitUnion(union, visitor)
}
func (union *CommandUnion) discriminator() *string {
return (*string)(&union.CommandType)
}
func (union *CommandUnion) Normalize() error {
return normalizeUnion(union, commandUnion)
}
func (union *CommandUnion) Simplify() {
simplifyUnion(union, commandUnion)
}
// +k8s:deepcopy-gen=false
type CommandUnionVisitor struct {
Exec func(*ExecCommand) error
Apply func(*ApplyCommand) error
Composite func(*CompositeCommand) error
Custom func(*CustomCommand) error
}
var k8sLikeComponentLocation reflect.Type = reflect.TypeOf(K8sLikeComponentLocationVisitor{})
func (union K8sLikeComponentLocation) Visit(visitor K8sLikeComponentLocationVisitor) error {
return visitUnion(union, visitor)
}
func (union *K8sLikeComponentLocation) discriminator() *string {
return (*string)(&union.LocationType)
}
func (union *K8sLikeComponentLocation) Normalize() error {
return normalizeUnion(union, k8sLikeComponentLocation)
}
func (union *K8sLikeComponentLocation) Simplify() {
simplifyUnion(union, k8sLikeComponentLocation)
}
// +k8s:deepcopy-gen=false
type K8sLikeComponentLocationVisitor struct {
Uri func(string) error
Inlined func(string) error
}
var componentUnion reflect.Type = reflect.TypeOf(ComponentUnionVisitor{})
func (union ComponentUnion) Visit(visitor ComponentUnionVisitor) error {
return visitUnion(union, visitor)
}
func (union *ComponentUnion) discriminator() *string {
return (*string)(&union.ComponentType)
}
func (union *ComponentUnion) Normalize() error {
return normalizeUnion(union, componentUnion)
}
func (union *ComponentUnion) Simplify() {
simplifyUnion(union, componentUnion)
}
// +k8s:deepcopy-gen=false
type ComponentUnionVisitor struct {
Container func(*ContainerComponent) error
Kubernetes func(*KubernetesComponent) error
Openshift func(*OpenshiftComponent) error
Volume func(*VolumeComponent) error
Plugin func(*PluginComponent) error
Custom func(*CustomComponent) error
}
var importReferenceUnion reflect.Type = reflect.TypeOf(ImportReferenceUnionVisitor{})
func (union ImportReferenceUnion) Visit(visitor ImportReferenceUnionVisitor) error {
return visitUnion(union, visitor)
}
func (union *ImportReferenceUnion) discriminator() *string {
return (*string)(&union.ImportReferenceType)
}
func (union *ImportReferenceUnion) Normalize() error {
return normalizeUnion(union, importReferenceUnion)
}
func (union *ImportReferenceUnion) Simplify() {
simplifyUnion(union, importReferenceUnion)
}
// +k8s:deepcopy-gen=false
type ImportReferenceUnionVisitor struct {
Uri func(string) error
Id func(string) error
Kubernetes func(*KubernetesCustomResourceImportReference) error
}
var projectSource reflect.Type = reflect.TypeOf(ProjectSourceVisitor{})
func (union ProjectSource) Visit(visitor ProjectSourceVisitor) error {
return visitUnion(union, visitor)
}
func (union *ProjectSource) discriminator() *string {
return (*string)(&union.SourceType)
}
func (union *ProjectSource) Normalize() error {
return normalizeUnion(union, projectSource)
}
func (union *ProjectSource) Simplify() {
simplifyUnion(union, projectSource)
}
// +k8s:deepcopy-gen=false
type ProjectSourceVisitor struct {
Git func(*GitProjectSource) error
Zip func(*ZipProjectSource) error
Custom func(*CustomProjectSource) error
}
var componentUnionParentOverride reflect.Type = reflect.TypeOf(ComponentUnionParentOverrideVisitor{})
func (union ComponentUnionParentOverride) Visit(visitor ComponentUnionParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *ComponentUnionParentOverride) discriminator() *string {
return (*string)(&union.ComponentType)
}
func (union *ComponentUnionParentOverride) Normalize() error {
return normalizeUnion(union, componentUnionParentOverride)
}
func (union *ComponentUnionParentOverride) Simplify() {
simplifyUnion(union, componentUnionParentOverride)
}
// +k8s:deepcopy-gen=false
type ComponentUnionParentOverrideVisitor struct {
Container func(*ContainerComponentParentOverride) error
Kubernetes func(*KubernetesComponentParentOverride) error
Openshift func(*OpenshiftComponentParentOverride) error
Volume func(*VolumeComponentParentOverride) error
Plugin func(*PluginComponentParentOverride) error
}
var projectSourceParentOverride reflect.Type = reflect.TypeOf(ProjectSourceParentOverrideVisitor{})
func (union ProjectSourceParentOverride) Visit(visitor ProjectSourceParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *ProjectSourceParentOverride) discriminator() *string {
return (*string)(&union.SourceType)
}
func (union *ProjectSourceParentOverride) Normalize() error {
return normalizeUnion(union, projectSourceParentOverride)
}
func (union *ProjectSourceParentOverride) Simplify() {
simplifyUnion(union, projectSourceParentOverride)
}
// +k8s:deepcopy-gen=false
type ProjectSourceParentOverrideVisitor struct {
Git func(*GitProjectSourceParentOverride) error
Zip func(*ZipProjectSourceParentOverride) error
}
var commandUnionParentOverride reflect.Type = reflect.TypeOf(CommandUnionParentOverrideVisitor{})
func (union CommandUnionParentOverride) Visit(visitor CommandUnionParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *CommandUnionParentOverride) discriminator() *string {
return (*string)(&union.CommandType)
}
func (union *CommandUnionParentOverride) Normalize() error {
return normalizeUnion(union, commandUnionParentOverride)
}
func (union *CommandUnionParentOverride) Simplify() {
simplifyUnion(union, commandUnionParentOverride)
}
// +k8s:deepcopy-gen=false
type CommandUnionParentOverrideVisitor struct {
Exec func(*ExecCommandParentOverride) error
Apply func(*ApplyCommandParentOverride) error
Composite func(*CompositeCommandParentOverride) error
}
var k8sLikeComponentLocationParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationParentOverrideVisitor{})
func (union K8sLikeComponentLocationParentOverride) Visit(visitor K8sLikeComponentLocationParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *K8sLikeComponentLocationParentOverride) discriminator() *string {
return (*string)(&union.LocationType)
}
func (union *K8sLikeComponentLocationParentOverride) Normalize() error {
return normalizeUnion(union, k8sLikeComponentLocationParentOverride)
}
func (union *K8sLikeComponentLocationParentOverride) Simplify() {
simplifyUnion(union, k8sLikeComponentLocationParentOverride)
}
// +k8s:deepcopy-gen=false
type K8sLikeComponentLocationParentOverrideVisitor struct {
Uri func(string) error
Inlined func(string) error
}
var importReferenceUnionParentOverride reflect.Type = reflect.TypeOf(ImportReferenceUnionParentOverrideVisitor{})
func (union ImportReferenceUnionParentOverride) Visit(visitor ImportReferenceUnionParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *ImportReferenceUnionParentOverride) discriminator() *string {
return (*string)(&union.ImportReferenceType)
}
func (union *ImportReferenceUnionParentOverride) Normalize() error {
return normalizeUnion(union, importReferenceUnionParentOverride)
}
func (union *ImportReferenceUnionParentOverride) Simplify() {
simplifyUnion(union, importReferenceUnionParentOverride)
}
// +k8s:deepcopy-gen=false
type ImportReferenceUnionParentOverrideVisitor struct {
Uri func(string) error
Id func(string) error
Kubernetes func(*KubernetesCustomResourceImportReferenceParentOverride) error
}
var componentUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideParentOverrideVisitor{})
func (union ComponentUnionPluginOverrideParentOverride) Visit(visitor ComponentUnionPluginOverrideParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *ComponentUnionPluginOverrideParentOverride) discriminator() *string {
return (*string)(&union.ComponentType)
}
func (union *ComponentUnionPluginOverrideParentOverride) Normalize() error {
return normalizeUnion(union, componentUnionPluginOverrideParentOverride)
}
func (union *ComponentUnionPluginOverrideParentOverride) Simplify() {
simplifyUnion(union, componentUnionPluginOverrideParentOverride)
}
// +k8s:deepcopy-gen=false
type ComponentUnionPluginOverrideParentOverrideVisitor struct {
Container func(*ContainerComponentPluginOverrideParentOverride) error
Kubernetes func(*KubernetesComponentPluginOverrideParentOverride) error
Openshift func(*OpenshiftComponentPluginOverrideParentOverride) error
Volume func(*VolumeComponentPluginOverrideParentOverride) error
}
var commandUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideParentOverrideVisitor{})
func (union CommandUnionPluginOverrideParentOverride) Visit(visitor CommandUnionPluginOverrideParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *CommandUnionPluginOverrideParentOverride) discriminator() *string {
return (*string)(&union.CommandType)
}
func (union *CommandUnionPluginOverrideParentOverride) Normalize() error {
return normalizeUnion(union, commandUnionPluginOverrideParentOverride)
}
func (union *CommandUnionPluginOverrideParentOverride) Simplify() {
simplifyUnion(union, commandUnionPluginOverrideParentOverride)
}
// +k8s:deepcopy-gen=false
type CommandUnionPluginOverrideParentOverrideVisitor struct {
Exec func(*ExecCommandPluginOverrideParentOverride) error
Apply func(*ApplyCommandPluginOverrideParentOverride) error
Composite func(*CompositeCommandPluginOverrideParentOverride) error
}
var k8sLikeComponentLocationPluginOverrideParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideParentOverrideVisitor{})
func (union K8sLikeComponentLocationPluginOverrideParentOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideParentOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *K8sLikeComponentLocationPluginOverrideParentOverride) discriminator() *string {
return (*string)(&union.LocationType)
}
func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Normalize() error {
return normalizeUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride)
}
func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Simplify() {
simplifyUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride)
}
// +k8s:deepcopy-gen=false
type K8sLikeComponentLocationPluginOverrideParentOverrideVisitor struct {
Uri func(string) error
Inlined func(string) error
}
var componentUnionPluginOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideVisitor{})
func (union ComponentUnionPluginOverride) Visit(visitor ComponentUnionPluginOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *ComponentUnionPluginOverride) discriminator() *string {
return (*string)(&union.ComponentType)
}
func (union *ComponentUnionPluginOverride) Normalize() error {
return normalizeUnion(union, componentUnionPluginOverride)
}
func (union *ComponentUnionPluginOverride) Simplify() {
simplifyUnion(union, componentUnionPluginOverride)
}
// +k8s:deepcopy-gen=false
type ComponentUnionPluginOverrideVisitor struct {
Container func(*ContainerComponentPluginOverride) error
Kubernetes func(*KubernetesComponentPluginOverride) error
Openshift func(*OpenshiftComponentPluginOverride) error
Volume func(*VolumeComponentPluginOverride) error
}
var commandUnionPluginOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideVisitor{})
func (union CommandUnionPluginOverride) Visit(visitor CommandUnionPluginOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *CommandUnionPluginOverride) discriminator() *string {
return (*string)(&union.CommandType)
}
func (union *CommandUnionPluginOverride) Normalize() error {
return normalizeUnion(union, commandUnionPluginOverride)
}
func (union *CommandUnionPluginOverride) Simplify() {
simplifyUnion(union, commandUnionPluginOverride)
}
// +k8s:deepcopy-gen=false
type CommandUnionPluginOverrideVisitor struct {
Exec func(*ExecCommandPluginOverride) error
Apply func(*ApplyCommandPluginOverride) error
Composite func(*CompositeCommandPluginOverride) error
}
var k8sLikeComponentLocationPluginOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideVisitor{})
func (union K8sLikeComponentLocationPluginOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideVisitor) error {
return visitUnion(union, visitor)
}
func (union *K8sLikeComponentLocationPluginOverride) discriminator() *string {
return (*string)(&union.LocationType)
}
func (union *K8sLikeComponentLocationPluginOverride) Normalize() error {
return normalizeUnion(union, k8sLikeComponentLocationPluginOverride)
}
func (union *K8sLikeComponentLocationPluginOverride) Simplify() {
simplifyUnion(union, k8sLikeComponentLocationPluginOverride)
}
// +k8s:deepcopy-gen=false
type K8sLikeComponentLocationPluginOverrideVisitor struct {
Uri func(string) error
Inlined func(string) error
}

View File

@ -0,0 +1,453 @@
package attributes
import (
"encoding/json"
"strconv"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
)
// Attributes provides a way to add a map of arbitrary YAML/JSON
// objects.
// +kubebuilder:validation:Type=object
// +kubebuilder:validation:XPreserveUnknownFields
type Attributes map[string]apiext.JSON
// MarshalJSON implements custom JSON marshaling
// to support free-form attributes
func (attributes Attributes) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]apiext.JSON(attributes))
}
// UnmarshalJSON implements custom JSON unmarshalling
// to support free-form attributes
func (attributes *Attributes) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, (*map[string]apiext.JSON)(attributes))
}
// Exists returns `true` if the attribute with the given key
// exists in the attributes map.
func (attributes Attributes) Exists(key string) bool {
_, exists := attributes[key]
return exists
}
type convertPrimitiveFunc func(attributes Attributes, key string, attributeType string) (interface{}, error)
func (attributes Attributes) getPrimitive(key string, zeroValue interface{}, resultType string, convert convertPrimitiveFunc, errorHolder *error) interface{} {
var err error
if attribute, exists := attributes[key]; exists {
var result interface{}
switch resultType {
case "string":
primitiveResult := new(string)
err = json.Unmarshal(attribute.Raw, primitiveResult)
result = *primitiveResult
case "boolean":
primitiveResult := new(bool)
err = json.Unmarshal(attribute.Raw, primitiveResult)
result = *primitiveResult
case "number":
primitiveResult := new(float64)
err = json.Unmarshal(attribute.Raw, primitiveResult)
result = *primitiveResult
}
if err == nil {
return result
}
switch typeError := err.(type) {
case *json.UnmarshalTypeError:
convertedValue, retryError := convert(attributes, key, typeError.Value)
if retryError == nil && convertedValue != nil {
return convertedValue
}
}
} else {
err = &KeyNotFoundError{Key: key}
}
if errorHolder != nil {
*errorHolder = err
}
return zeroValue
}
// GetString allows returning the attribute with the given key
// as a string. If the attribute JSON/YAML content is
// not a JSON string (or a primitive type that can be converted into a string),
// then the result will be the empty string and an error will be raised.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) GetString(key string, errorHolder *error) string {
return attributes.getPrimitive(
key,
"",
"string",
func(attributes Attributes, key string, attributeType string) (interface{}, error) {
var convertedValue interface{}
var retryError error
switch attributeType {
case "bool":
convertedValue = strconv.FormatBool(attributes.GetBoolean(key, &retryError))
case "number":
convertedValue = strconv.FormatFloat(attributes.GetNumber(key, &retryError), 'g', -1, 64)
}
return convertedValue, retryError
},
errorHolder).(string)
}
// GetNumber allows returning the attribute with the given key
// as a float64. If the attribute JSON/YAML content is
// not a JSON number (or a JSON string that can be converted into a JSON number),
// then the result will be the zero value and an error is raised.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) GetNumber(key string, errorHolder *error) float64 {
return attributes.getPrimitive(
key,
0.0,
"number",
func(attributes Attributes, key string, attributeType string) (interface{}, error) {
var convertedValue interface{}
var retryError error
switch attributeType {
case "string":
var convError error
convertedValue, convError = strconv.ParseFloat(attributes.GetString(key, &retryError), 64)
if retryError == nil {
retryError = convError
}
}
return convertedValue, retryError
},
errorHolder).(float64)
}
// GetBoolean allows returning the attribute with the given key
// as a bool. If the attribute JSON/YAML content is
// not a JSON boolean (or a JSON string that can be converted into a JSON boolean),
// then the result will be the `false` zero value and an error is raised.
//
// String values can be converted to boolean values according to the following rules:
//
// - strings "1", "t", "T", "TRUE", "true", and "True" will be converted to a `true` boolean
//
// - strings "0, "f", "F", "FALSE", "false", "False" will be converted to a `false` boolean
//
// - any other string value will raise an error.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) GetBoolean(key string, errorHolder *error) bool {
return attributes.getPrimitive(
key,
false,
"boolean",
func(attributes Attributes, key string, attributeType string) (interface{}, error) {
var convertedValue interface{}
var retryError error
switch attributeType {
case "string":
var convError error
convertedValue, convError = strconv.ParseBool(attributes.GetString(key, &retryError))
if retryError == nil {
retryError = convError
}
}
return convertedValue, retryError
},
errorHolder).(bool)
}
// Get allows returning the attribute with the given key
// as an interface. The underlying type of the returned interface
// depends on the JSON/YAML content of the attribute. It can be either a simple type
// like a string, a float64 or a bool, either a structured type like
// a map of interfaces or an array of interfaces.
//
// An optional error holder can be passed as an argument
// to receive any error that might have occurred during the attribute
// decoding
func (attributes Attributes) Get(key string, errorHolder *error) interface{} {
if attribute, exists := attributes[key]; exists {
container := &[]interface{}{}
err := json.Unmarshal([]byte("[ "+string(attribute.Raw)+" ]"), container)
if err != nil && errorHolder != nil {
*errorHolder = err
}
if len(*container) > 0 {
return (*container)[0]
}
} else if !exists && errorHolder != nil {
*errorHolder = &KeyNotFoundError{Key: key}
}
return nil
}
// GetInto allows decoding the attribute with the given key
// into a given interface. The provided interface should be a pointer
// to a struct, to an array, or to any simple type.
//
// An error is returned if the provided interface type is not compatible
// with the attribute content
func (attributes Attributes) GetInto(key string, into interface{}) error {
var err error
if attribute, exists := attributes[key]; exists {
err = json.Unmarshal(attribute.Raw, into)
} else {
err = &KeyNotFoundError{Key: key}
}
return err
}
// Strings allows returning only the attributes whose content
// is a JSON string.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) Strings(errorHolder *error) map[string]string {
result := map[string]string{}
for key := range attributes {
// Here only the last error is returned.
// Let's keep it simple and avoid adding a dependency
// on an external package just for gathering errors.
if value, isRightType := attributes.Get(key, errorHolder).(string); isRightType {
result[key] = value
}
}
return result
}
// Numbers allows returning only the attributes whose content
// is a JSON number.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) Numbers(errorHolder *error) map[string]float64 {
result := map[string]float64{}
for key := range attributes {
// Here only the last error is returned.
// Let's keep it simple and avoid adding a dependency
// on an external package just for gathering errors.
if value, isRightType := attributes.Get(key, errorHolder).(float64); isRightType {
result[key] = value
}
}
return result
}
// Booleans allows returning only the attributes whose content
// is a JSON boolean.
//
// An optional error holder can be passed as an argument
// to receive any error that might have be raised during the attribute
// decoding
func (attributes Attributes) Booleans(errorHolder *error) map[string]bool {
result := map[string]bool{}
for key := range attributes {
// Here only the last error is returned.
// Let's keep it simple and avoid adding a dependency
// on an external package just for gathering errors
if value, isRightType := attributes.Get(key, errorHolder).(bool); isRightType {
result[key] = value
}
}
return result
}
// Into allows decoding the whole attributes map
// into a given interface. The provided interface should be either a pointer
// to a struct, or to a map.
//
// An error is returned if the provided interface type is not compatible
// with the structure of the attributes
func (attributes Attributes) Into(into interface{}) error {
if attributes == nil {
return nil
}
rawJSON, err := json.Marshal(attributes)
if err != nil {
return err
}
err = json.Unmarshal(rawJSON, into)
return err
}
// AsInterface allows returning the whole attributes map...
// as an interface. When the attributes are not empty,
// the returned interface will be a map
// of interfaces.
//
// An optional error holder can be passed as an argument
// to receive any error that might have occured during the attributes
// decoding
func (attributes Attributes) AsInterface(errorHolder *error) interface{} {
rawJSON, err := json.Marshal(attributes)
if err != nil && errorHolder != nil {
*errorHolder = err
return nil
}
container := &[]interface{}{}
err = json.Unmarshal([]byte("[ "+string(rawJSON)+" ]"), container)
if err != nil && errorHolder != nil {
*errorHolder = err
return nil
}
return (*container)[0]
}
// PutString allows adding a string attribute to the
// current map of attributes
func (attributes Attributes) PutString(key string, value string) Attributes {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
return attributes
}
// FromStringMap allows adding into the current map of attributes all
// the attributes contained in the given string map
func (attributes Attributes) FromStringMap(strings map[string]string) Attributes {
for key, value := range strings {
attributes.PutString(key, value)
}
return attributes
}
// PutFloat allows adding a float attribute to the
// current map of attributes
func (attributes Attributes) PutFloat(key string, value float64) Attributes {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
return attributes
}
// FromFloatMap allows adding into the current map of attributes all
// the attributes contained in the given map of floats
func (attributes Attributes) FromFloatMap(strings map[string]float64) Attributes {
for key, value := range strings {
attributes.PutFloat(key, value)
}
return attributes
}
// PutInteger allows adding an integer attribute to the
// current map of attributes
func (attributes Attributes) PutInteger(key string, value int) Attributes {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
return attributes
}
// FromIntegerMap allows adding into the current map of attributes all
// the attributes contained in the given map of integers
func (attributes Attributes) FromIntegerMap(strings map[string]int) Attributes {
for key, value := range strings {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
}
return attributes
}
// PutBoolean allows adding a boolean attribute to the
// current map of attributes
func (attributes Attributes) PutBoolean(key string, value bool) Attributes {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
return attributes
}
// FromBooleanMap allows adding into the current map of attributes all
// the attributes contained in the given map of booleans
func (attributes Attributes) FromBooleanMap(strings map[string]bool) Attributes {
for key, value := range strings {
rawJSON, _ := json.Marshal(value)
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
}
return attributes
}
// Put allows adding an attribute to the
// current map of attributes.
// The attribute is provided as an interface, and can be any value
// that supports Json Marshaling.
//
// An optional error holder can be passed as an argument
// to receive any error that might have occured during the attributes
// decoding
func (attributes Attributes) Put(key string, value interface{}, errorHolder *error) Attributes {
rawJSON, err := json.Marshal(value)
if err != nil && errorHolder != nil {
*errorHolder = err
}
attributes[key] = apiext.JSON{
Raw: rawJSON,
}
return attributes
}
// FromMap allows adding into the current map of attributes all
// the attributes contained in the given map of interfaces
// each attribute of the given map is provided as an interface, and can be any value
// that supports Json Marshaling.
//
// An optional error holder can be passed as an argument
// to receive any error that might have occured during the attributes
// decoding
func (attributes Attributes) FromMap(strings map[string]interface{}, errorHolder *error) Attributes {
for key, value := range strings {
// Here only the last error is returned.
// Let's keep it simple and avoid adding a dependency
// on an external package just for gathering errors.
attributes.Put(key, value, errorHolder)
}
return attributes
}
// FromInterface allows completing the map of attributes from the given interface.
// The given interface, and can be any value
// that supports Json Marshaling and will be marshalled as a JSON object.
//
// This is especially useful to create attributes from well-known, but
// implementation- dependent Go structures.
//
// An optional error holder can be passed as an argument
// to receive any error that might have occured during the attributes
// decoding
func (attributes Attributes) FromInterface(structure interface{}, errorHolder *error) Attributes {
newAttributes := Attributes{}
completeJSON, err := json.Marshal(structure)
if err != nil && errorHolder != nil {
*errorHolder = err
}
err = json.Unmarshal(completeJSON, &newAttributes)
for key, value := range newAttributes {
attributes[key] = value
}
return attributes
}

View File

@ -0,0 +1,12 @@
package attributes
import "fmt"
// KeyNotFoundError returns an error if no key is found for the attribute
type KeyNotFoundError struct {
Key string
}
func (e *KeyNotFoundError) Error() string {
return fmt.Sprintf("Attribute with key %q does not exist", e.Key)
}

84
vendor/github.com/devfile/api/v2/pkg/devfile/header.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package devfile
import (
attributes "github.com/devfile/api/v2/pkg/attributes"
)
// DevfileHeader describes the structure of the devfile-specific top-level fields
// that are not part of the K8S API structures
type DevfileHeader struct {
// Devfile schema version
// +kubebuilder:validation:Pattern=^([2-9])\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$
SchemaVersion string `json:"schemaVersion"`
// +kubebuilder:pruning:PreserveUnknownFields
// +optional
// Optional metadata
Metadata DevfileMetadata `json:"metadata,omitempty"`
}
// Architecture describes the architecture type
// +kubebuilder:validation:Enum=amd64;arm64;ppc64le;s390x
type Architecture string
const (
AMD64 Architecture = "amd64"
ARM64 Architecture = "arm64"
PPC64LE Architecture = "ppc64le"
S390X Architecture = "s390x"
)
type DevfileMetadata struct {
// Optional devfile name
// +optional
Name string `json:"name,omitempty"`
// Optional semver-compatible version
// +optional
// +kubebuilder:validation:Pattern=^([0-9]+)\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$
Version string `json:"version,omitempty"`
// Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.
// +optional
// +kubebuilder:validation:Type=object
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Attributes attributes.Attributes `json:"attributes,omitempty"`
// Optional devfile display name
// +optional
DisplayName string `json:"displayName,omitempty"`
// Optional devfile description
// +optional
Description string `json:"description,omitempty"`
// Optional devfile tags
// +optional
Tags []string `json:"tags,omitempty"`
// Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture
// +optional
// +kubebuilder:validation:UniqueItems=true
Architectures []Architecture `json:"architectures,omitempty"`
// Optional devfile icon, can be a URI or a relative path in the project
// +optional
Icon string `json:"icon,omitempty"`
// Optional devfile global memory limit
// +optional
GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"`
// Optional devfile project type
// +optional
ProjectType string `json:"projectType,omitempty"`
// Optional devfile language
// +optional
Language string `json:"language,omitempty"`
// Optional devfile website
// +optional
Website string `json:"website,omitempty"`
}

277
vendor/github.com/devfile/devworkspace-operator/LICENSE generated vendored Normal file
View File

@ -0,0 +1,277 @@
Eclipse Public License - v 2.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial content
Distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from
and are Distributed by that particular Contributor. A Contribution
"originates" from a Contributor if it was added to the Program by
such Contributor itself or anyone acting on such Contributor's behalf.
Contributions do not include changes or additions to the Program that
are not Modified Works.
"Contributor" means any person or entity that Distributes the Program.
"Licensed Patents" mean patent claims licensable by a Contributor which
are necessarily infringed by the use or sale of its Contribution alone
or when combined with the Program.
"Program" means the Contributions Distributed in accordance with this
Agreement.
"Recipient" means anyone who receives the Program under this Agreement
or any Secondary License (as applicable), including Contributors.
"Derivative Works" shall mean any work, whether in Source Code or other
form, that is based on (or derived from) the Program and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship.
"Modified Works" shall mean any work in Source Code or other form that
results from an addition to, deletion from, or modification of the
contents of the Program, including, for purposes of clarity any new file
in Source Code form that contains any contents of the Program. Modified
Works shall not include works that contain only declarations,
interfaces, types, classes, structures, or files of the Program solely
in each case in order to link to, bind by name, or subclass the Program
or Modified Works thereof.
"Distribute" means the acts of a) distributing or b) making available
in any manner that enables the transfer of a copy.
"Source Code" means the form of a Program preferred for making
modifications, including but not limited to software source code,
documentation source, and configuration files.
"Secondary License" means either the GNU General Public License,
Version 2.0, or any later versions of that license, including any
exceptions or additional permissions as identified by the initial
Contributor.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby
grants Recipient a non-exclusive, worldwide, royalty-free copyright
license to reproduce, prepare Derivative Works of, publicly display,
publicly perform, Distribute and sublicense the Contribution of such
Contributor, if any, and such Derivative Works.
b) Subject to the terms of this Agreement, each Contributor hereby
grants Recipient a non-exclusive, worldwide, royalty-free patent
license under Licensed Patents to make, use, sell, offer to sell,
import and otherwise transfer the Contribution of such Contributor,
if any, in Source Code or other form. This patent license shall
apply to the combination of the Contribution and the Program if, at
the time the Contribution is added by the Contributor, such addition
of the Contribution causes such combination to be covered by the
Licensed Patents. The patent license shall not apply to any other
combinations which include the Contribution. No hardware per se is
licensed hereunder.
c) Recipient understands that although each Contributor grants the
licenses to its Contributions set forth herein, no assurances are
provided by any Contributor that the Program does not infringe the
patent or other intellectual property rights of any other entity.
Each Contributor disclaims any liability to Recipient for claims
brought by any other entity based on infringement of intellectual
property rights or otherwise. As a condition to exercising the
rights and licenses granted hereunder, each Recipient hereby
assumes sole responsibility to secure any other intellectual
property rights needed, if any. For example, if a third party
patent license is required to allow Recipient to Distribute the
Program, it is Recipient's responsibility to acquire that license
before distributing the Program.
d) Each Contributor represents that to its knowledge it has
sufficient copyright rights in its Contribution, if any, to grant
the copyright license set forth in this Agreement.
e) Notwithstanding the terms of any Secondary License, no
Contributor makes additional grants to any Recipient (other than
those set forth in this Agreement) as a result of such Recipient's
receipt of the Program under the terms of a Secondary License
(if permitted under the terms of Section 3).
3. REQUIREMENTS
3.1 If a Contributor Distributes the Program in any form, then:
a) the Program must also be made available as Source Code, in
accordance with section 3.2, and the Contributor must accompany
the Program with a statement that the Source Code for the Program
is available under this Agreement, and informs Recipients how to
obtain it in a reasonable manner on or through a medium customarily
used for software exchange; and
b) the Contributor may Distribute the Program under a license
different than this Agreement, provided that such license:
i) effectively disclaims on behalf of all other Contributors all
warranties and conditions, express and implied, including
warranties or conditions of title and non-infringement, and
implied warranties or conditions of merchantability and fitness
for a particular purpose;
ii) effectively excludes on behalf of all other Contributors all
liability for damages, including direct, indirect, special,
incidental and consequential damages, such as lost profits;
iii) does not attempt to limit or alter the recipients' rights
in the Source Code under section 3.2; and
iv) requires any subsequent distribution of the Program by any
party to be under a license that satisfies the requirements
of this section 3.
3.2 When the Program is Distributed as Source Code:
a) it must be made available under this Agreement, or if the
Program (i) is combined with other material in a separate file or
files made available under a Secondary License, and (ii) the initial
Contributor attached to the Source Code the notice described in
Exhibit A of this Agreement, then the Program may be made available
under the terms of such Secondary Licenses, and
b) a copy of this Agreement must be included with each copy of
the Program.
3.3 Contributors may not remove or alter any copyright, patent,
trademark, attribution notices, disclaimers of warranty, or limitations
of liability ("notices") contained within the Program from any copy of
the Program which they Distribute, provided that Contributors may add
their own appropriate notices.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities
with respect to end users, business partners and the like. While this
license is intended to facilitate the commercial use of the Program,
the Contributor who includes the Program in a commercial product
offering should do so in a manner which does not create potential
liability for other Contributors. Therefore, if a Contributor includes
the Program in a commercial product offering, such Contributor
("Commercial Contributor") hereby agrees to defend and indemnify every
other Contributor ("Indemnified Contributor") against any losses,
damages and costs (collectively "Losses") arising from claims, lawsuits
and other legal actions brought by a third party against the Indemnified
Contributor to the extent caused by the acts or omissions of such
Commercial Contributor in connection with its distribution of the Program
in a commercial product offering. The obligations in this section do not
apply to any claims or Losses relating to any actual or alleged
intellectual property infringement. In order to qualify, an Indemnified
Contributor must: a) promptly notify the Commercial Contributor in
writing of such claim, and b) allow the Commercial Contributor to control,
and cooperate with the Commercial Contributor in, the defense and any
related settlement negotiations. The Indemnified Contributor may
participate in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial
product offering, Product X. That Contributor is then a Commercial
Contributor. If that Commercial Contributor then makes performance
claims, or offers warranties related to Product X, those performance
claims and warranties are such Commercial Contributor's responsibility
alone. Under this section, the Commercial Contributor would have to
defend claims against the other Contributors related to those performance
claims and warranties, and if a court requires any other Contributor to
pay any damages as a result, the Commercial Contributor must pay
those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF
TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
PURPOSE. Each Recipient is solely responsible for determining the
appropriateness of using and distributing the Program and assumes all
risks associated with its exercise of rights under this Agreement,
including but not limited to the risks and costs of program errors,
compliance with applicable laws, damage to or loss of data, programs
or equipment, and unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS
SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of
the remainder of the terms of this Agreement, and without further
action by the parties hereto, such provision shall be reformed to the
minimum extent necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that the
Program itself (excluding combinations of the Program with other software
or hardware) infringes such Recipient's patent(s), then such Recipient's
rights granted under Section 2(b) shall terminate as of the date such
litigation is filed.
All Recipient's rights under this Agreement shall terminate if it
fails to comply with any of the material terms or conditions of this
Agreement and does not cure such failure in a reasonable period of
time after becoming aware of such noncompliance. If all Recipient's
rights under this Agreement terminate, Recipient agrees to cease use
and distribution of the Program as soon as reasonably practicable.
However, Recipient's obligations under this Agreement and any licenses
granted by Recipient relating to the Program shall continue and survive.
Everyone is permitted to copy and distribute copies of this Agreement,
but in order to avoid inconsistency the Agreement is copyrighted and
may only be modified in the following manner. The Agreement Steward
reserves the right to publish new versions (including revisions) of
this Agreement from time to time. No one other than the Agreement
Steward has the right to modify this Agreement. The Eclipse Foundation
is the initial Agreement Steward. The Eclipse Foundation may assign the
responsibility to serve as the Agreement Steward to a suitable separate
entity. Each new version of the Agreement will be given a distinguishing
version number. The Program (including Contributions) may always be
Distributed subject to the version of the Agreement under which it was
received. In addition, after a new version of the Agreement is published,
Contributor may elect to Distribute the Program (including its
Contributions) under the new version.
Except as expressly stated in Sections 2(a) and 2(b) above, Recipient
receives no rights or licenses to the intellectual property of any
Contributor under this Agreement, whether expressly, by implication,
estoppel or otherwise. All rights in the Program not expressly granted
under this Agreement are reserved. Nothing in this Agreement is intended
to be enforceable by any entity that is not a Contributor or Recipient.
No third-party beneficiary rights are created under this Agreement.
Exhibit A - Form of Secondary Licenses Notice
"This Source Code may also be made available under the following
Secondary Licenses when the conditions for such availability set forth
in the Eclipse Public License, v. 2.0 are satisfied: {name license(s),
version(s), and exceptions or additional permissions here}."
Simply including a copy of this Agreement, including this Exhibit A
is not sufficient to license the Source Code under Secondary Licenses.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to
look for such a notice.
You may add additional accurate notices of copyright ownership.

View File

@ -0,0 +1,59 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package v1alpha1
import v1 "k8s.io/api/core/v1"
// Summary of additions that are to be merged into the main devworkspace deployment
type PodAdditions struct {
// Annotations to be applied to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Annotations map[string]string `json:"annotations,omitempty"`
// Labels to be applied to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Labels map[string]string `json:"labels,omitempty"`
// Containers to add to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Containers []v1.Container `json:"containers,omitempty"`
// Init containers to add to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []v1.Container `json:"initContainers,omitempty"`
// Volumes to add to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Volumes []v1.Volume `json:"volumes,omitempty"`
// VolumeMounts to add to all containers in a devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"`
// ImagePullSecrets to add to devworkspace deployment
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
PullSecrets []v1.LocalObjectReference `json:"pullSecrets,omitempty"`
// Annotations for the devworkspace service account, it might be used for e.g. OpenShift oauth with SA as auth client
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"`
}

View File

@ -0,0 +1,31 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package v1alpha1
type EndpointAttribute string
type EndpointType string
const (
// TypeEndpointAttribute is an attribute used for devfile endpoints that specifies the endpoint type.
// See EndpointType for respected values
TypeEndpointAttribute EndpointAttribute = "type"
// The value for `type` endpoint attribute that indicates that it should be exposed as mainUrl
// in the workspace status
MainEndpointType EndpointType = "main"
// DiscoverableAttribute defines an endpoint as "discoverable", meaning that a service should be
// created using the endpoint name (i.e. instead of generating a service name for all endpoints,
// this endpoint should be statically accessible)
DiscoverableAttribute EndpointAttribute = "discoverable"
)

View File

@ -0,0 +1,107 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package v1alpha1
import (
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
devfileAttr "github.com/devfile/api/v2/pkg/attributes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DevWorkspaceRoutingSpec defines the desired state of DevWorkspaceRouting
// +k8s:openapi-gen=true
type DevWorkspaceRoutingSpec struct {
// Id for the DevWorkspace being routed
DevWorkspaceId string `json:"devworkspaceId"`
// Class of the routing: this drives which DevWorkspaceRouting controller will manage this routing
RoutingClass DevWorkspaceRoutingClass `json:"routingClass,omitempty"`
// Machines to endpoints map
Endpoints map[string]EndpointList `json:"endpoints"`
// Selector that should be used by created services to point to the devworkspace Pod
PodSelector map[string]string `json:"podSelector"`
}
type DevWorkspaceRoutingClass string
const (
DevWorkspaceRoutingBasic DevWorkspaceRoutingClass = "basic"
DevWorkspaceRoutingCluster DevWorkspaceRoutingClass = "cluster"
DevWorkspaceRoutingClusterTLS DevWorkspaceRoutingClass = "cluster-tls"
DevWorkspaceRoutingWebTerminal DevWorkspaceRoutingClass = "web-terminal"
)
// DevWorkspaceRoutingStatus defines the observed state of DevWorkspaceRouting
// +k8s:openapi-gen=true
type DevWorkspaceRoutingStatus struct {
// Additions to main devworkspace deployment
PodAdditions *PodAdditions `json:"podAdditions,omitempty"`
// Machine name to exposed endpoint map
ExposedEndpoints map[string]ExposedEndpointList `json:"exposedEndpoints,omitempty"`
// Routing reconcile phase
Phase DevWorkspaceRoutingPhase `json:"phase,omitempty"`
// Message is a user-readable message explaining the current phase (e.g. reason for failure)
Message string `json:"message,omitempty"`
}
// Valid phases for devworkspacerouting
type DevWorkspaceRoutingPhase string
const (
RoutingReady DevWorkspaceRoutingPhase = "Ready"
RoutingPreparing DevWorkspaceRoutingPhase = "Preparing"
RoutingFailed DevWorkspaceRoutingPhase = "Failed"
)
type ExposedEndpoint struct {
// Name of the exposed endpoint
Name string `json:"name"`
// Public URL of the exposed endpoint
Url string `json:"url"`
// Attributes of the exposed endpoint
// +optional
Attributes devfileAttr.Attributes `json:"attributes,omitempty"`
}
type EndpointList []dw.Endpoint
type ExposedEndpointList []ExposedEndpoint
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspaceRouting is the Schema for the devworkspaceroutings API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=devworkspaceroutings,scope=Namespaced,shortName=dwr
// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".spec.devworkspaceId",description="The owner DevWorkspace's unique id"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current phase"
// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional info about DevWorkspaceRouting state"
type DevWorkspaceRouting struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DevWorkspaceRoutingSpec `json:"spec,omitempty"`
Status DevWorkspaceRoutingStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DevWorkspaceRoutingList contains a list of DevWorkspaceRouting
type DevWorkspaceRoutingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DevWorkspaceRouting `json:"items"`
}
func init() {
SchemeBuilder.Register(&DevWorkspaceRouting{}, &DevWorkspaceRoutingList{})
}

View File

@ -0,0 +1,16 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
// Package v1alpha1 contains API Schema definitions for the controller v1alpha1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=controller.devfile.io
package v1alpha1

View File

@ -0,0 +1,32 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
// Package v1alpha1 contains API Schema definitions for the controller v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=controller.devfile.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "controller.devfile.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,292 @@
// +build !ignore_autogenerated
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/devfile/api/v2/pkg/attributes"
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DevWorkspaceRouting) DeepCopyInto(out *DevWorkspaceRouting) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRouting.
func (in *DevWorkspaceRouting) DeepCopy() *DevWorkspaceRouting {
if in == nil {
return nil
}
out := new(DevWorkspaceRouting)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DevWorkspaceRouting) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DevWorkspaceRoutingList) DeepCopyInto(out *DevWorkspaceRoutingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DevWorkspaceRouting, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingList.
func (in *DevWorkspaceRoutingList) DeepCopy() *DevWorkspaceRoutingList {
if in == nil {
return nil
}
out := new(DevWorkspaceRoutingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DevWorkspaceRoutingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DevWorkspaceRoutingSpec) DeepCopyInto(out *DevWorkspaceRoutingSpec) {
*out = *in
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make(map[string]EndpointList, len(*in))
for key, val := range *in {
var outVal []v1alpha2.Endpoint
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(EndpointList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
(*out)[key] = outVal
}
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingSpec.
func (in *DevWorkspaceRoutingSpec) DeepCopy() *DevWorkspaceRoutingSpec {
if in == nil {
return nil
}
out := new(DevWorkspaceRoutingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DevWorkspaceRoutingStatus) DeepCopyInto(out *DevWorkspaceRoutingStatus) {
*out = *in
if in.PodAdditions != nil {
in, out := &in.PodAdditions, &out.PodAdditions
*out = new(PodAdditions)
(*in).DeepCopyInto(*out)
}
if in.ExposedEndpoints != nil {
in, out := &in.ExposedEndpoints, &out.ExposedEndpoints
*out = make(map[string]ExposedEndpointList, len(*in))
for key, val := range *in {
var outVal []ExposedEndpoint
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(ExposedEndpointList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
(*out)[key] = outVal
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingStatus.
func (in *DevWorkspaceRoutingStatus) DeepCopy() *DevWorkspaceRoutingStatus {
if in == nil {
return nil
}
out := new(DevWorkspaceRoutingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in EndpointList) DeepCopyInto(out *EndpointList) {
{
in := &in
*out = make(EndpointList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList.
func (in EndpointList) DeepCopy() EndpointList {
if in == nil {
return nil
}
out := new(EndpointList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposedEndpoint) DeepCopyInto(out *ExposedEndpoint) {
*out = *in
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make(attributes.Attributes, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposedEndpoint.
func (in *ExposedEndpoint) DeepCopy() *ExposedEndpoint {
if in == nil {
return nil
}
out := new(ExposedEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExposedEndpointList) DeepCopyInto(out *ExposedEndpointList) {
{
in := &in
*out = make(ExposedEndpointList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposedEndpointList.
func (in ExposedEndpointList) DeepCopy() ExposedEndpointList {
if in == nil {
return nil
}
out := new(ExposedEndpointList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodAdditions) DeepCopyInto(out *PodAdditions) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.ServiceAccountAnnotations != nil {
in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAdditions.
func (in *PodAdditions) DeepCopy() *PodAdditions {
if in == nil {
return nil
}
out := new(PodAdditions)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,329 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspacerouting
import (
"context"
"errors"
"fmt"
"time"
"github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers"
maputils "github.com/devfile/devworkspace-operator/internal/map"
"github.com/devfile/devworkspace-operator/pkg/config"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
routeV1 "github.com/openshift/api/route/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
var (
NoSolversEnabled = errors.New("reconciler does not define SolverGetter")
)
const devWorkspaceRoutingFinalizer = "devworkspacerouting.controller.devfile.io"
// DevWorkspaceRoutingReconciler reconciles a DevWorkspaceRouting object
type DevWorkspaceRoutingReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
// SolverGetter will be used to get solvers for a particular devWorkspaceRouting
SolverGetter solvers.RoutingSolverGetter
}
// +kubebuilder:rbac:groups=controller.devfile.io,resources=devworkspaceroutings,verbs=*
// +kubebuilder:rbac:groups=controller.devfile.io,resources=devworkspaceroutings/status,verbs=get;update;patch
// +kubebuilder:rbac:groups="",resources=services,verbs=*
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=*
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=*
// +kubebuidler:rbac:groups=route.openshift.io,resources=routes/status,verbs=get,list,watch
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=create
func (r *DevWorkspaceRoutingReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name)
// Fetch the DevWorkspaceRouting instance
instance := &controllerv1alpha1.DevWorkspaceRouting{}
err := r.Get(ctx, req.NamespacedName, instance)
if err != nil {
if k8sErrors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
reqLogger = reqLogger.WithValues(constants.DevWorkspaceIDLoggerKey, instance.Spec.DevWorkspaceId)
reqLogger.Info("Reconciling DevWorkspaceRouting")
if instance.Spec.RoutingClass == "" {
return reconcile.Result{}, r.markRoutingFailed(instance, "DevWorkspaceRouting requires field routingClass to be set")
}
solver, err := r.SolverGetter.GetSolver(r.Client, instance.Spec.RoutingClass)
if err != nil {
if errors.Is(err, solvers.RoutingNotSupported) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Invalid routingClass for DevWorkspace: %s", err))
}
// Check if the DevWorkspaceRouting instance is marked to be deleted, which is
// indicated by the deletion timestamp being set.
if instance.GetDeletionTimestamp() != nil {
reqLogger.Info("Finalizing DevWorkspaceRouting")
return reconcile.Result{}, r.finalize(solver, instance)
}
if instance.Status.Phase == controllerv1alpha1.RoutingFailed {
return reconcile.Result{}, nil
}
// Add finalizer for this CR if not already present
if err := r.setFinalizer(reqLogger, solver, instance); err != nil {
return reconcile.Result{}, err
}
workspaceMeta := solvers.DevWorkspaceMetadata{
DevWorkspaceId: instance.Spec.DevWorkspaceId,
Namespace: instance.Namespace,
PodSelector: instance.Spec.PodSelector,
}
restrictedAccess, setRestrictedAccess := instance.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation]
routingObjects, err := solver.GetSpecObjects(instance, workspaceMeta)
if err != nil {
var notReady *solvers.RoutingNotReady
if errors.As(err, &notReady) {
duration := notReady.Retry
if duration.Milliseconds() == 0 {
duration = 1 * time.Second
}
reqLogger.Info("controller not ready for devworkspace routing. Retrying", "DelayMs", duration.Milliseconds())
return reconcile.Result{RequeueAfter: duration}, r.reconcileStatus(instance, nil, nil, false, "Waiting for DevWorkspaceRouting controller to be ready")
}
var invalid *solvers.RoutingInvalid
if errors.As(err, &invalid) {
reqLogger.Error(invalid, "routing controller considers routing invalid")
return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Unable to provision networking for DevWorkspace: %s", invalid))
}
// generic error, just fail the reconciliation
return reconcile.Result{}, err
}
services := routingObjects.Services
for idx := range services {
err := controllerutil.SetControllerReference(instance, &services[idx], r.Scheme)
if err != nil {
return reconcile.Result{}, err
}
if setRestrictedAccess {
services[idx].Annotations = maputils.Append(services[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess)
}
}
ingresses := routingObjects.Ingresses
for idx := range ingresses {
err := controllerutil.SetControllerReference(instance, &ingresses[idx], r.Scheme)
if err != nil {
return reconcile.Result{}, err
}
if setRestrictedAccess {
ingresses[idx].Annotations = maputils.Append(ingresses[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess)
}
}
routes := routingObjects.Routes
for idx := range routes {
err := controllerutil.SetControllerReference(instance, &routes[idx], r.Scheme)
if err != nil {
return reconcile.Result{}, err
}
if setRestrictedAccess {
routes[idx].Annotations = maputils.Append(routes[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess)
}
}
servicesInSync, clusterServices, err := r.syncServices(instance, services)
if err != nil {
reqLogger.Error(err, "Error syncing services")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing services")
} else if !servicesInSync {
reqLogger.Info("Services not in sync")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing services")
}
clusterRoutingObj := solvers.RoutingObjects{
Services: clusterServices,
}
if infrastructure.IsOpenShift() {
routesInSync, clusterRoutes, err := r.syncRoutes(instance, routes)
if err != nil {
reqLogger.Error(err, "Error syncing routes")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing routes")
} else if !routesInSync {
reqLogger.Info("Routes not in sync")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing routes")
}
clusterRoutingObj.Routes = clusterRoutes
} else {
ingressesInSync, clusterIngresses, err := r.syncIngresses(instance, ingresses)
if err != nil {
reqLogger.Error(err, "Error syncing ingresses")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing ingresses")
} else if !ingressesInSync {
reqLogger.Info("Ingresses not in sync")
return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing ingresses")
}
clusterRoutingObj.Ingresses = clusterIngresses
}
exposedEndpoints, endpointsAreReady, err := solver.GetExposedEndpoints(instance.Spec.Endpoints, clusterRoutingObj)
if err != nil {
reqLogger.Error(err, "Could not get exposed endpoints for devworkspace")
return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Could not get exposed endpoints for DevWorkspace: %s", err))
}
return reconcile.Result{}, r.reconcileStatus(instance, &routingObjects, exposedEndpoints, endpointsAreReady, "")
}
// setFinalizer ensures a finalizer is set on a devWorkspaceRouting instance; no-op if finalizer is already present.
func (r *DevWorkspaceRoutingReconciler) setFinalizer(reqLogger logr.Logger, solver solvers.RoutingSolver, m *controllerv1alpha1.DevWorkspaceRouting) error {
if !solver.FinalizerRequired(m) || contains(m.GetFinalizers(), devWorkspaceRoutingFinalizer) {
return nil
}
reqLogger.Info("Adding Finalizer for the DevWorkspaceRouting")
m.SetFinalizers(append(m.GetFinalizers(), devWorkspaceRoutingFinalizer))
// Update CR
err := r.Update(context.TODO(), m)
if err != nil {
reqLogger.Error(err, "Failed to update DevWorkspaceRouting with finalizer")
return err
}
return nil
}
func (r *DevWorkspaceRoutingReconciler) finalize(solver solvers.RoutingSolver, instance *controllerv1alpha1.DevWorkspaceRouting) error {
if contains(instance.GetFinalizers(), devWorkspaceRoutingFinalizer) {
// let the solver finalize its stuff
err := solver.Finalize(instance)
if err != nil {
return err
}
// Remove devWorkspaceRoutingFinalizer. Once all finalizers have been
// removed, the object will be deleted.
instance.SetFinalizers(remove(instance.GetFinalizers(), devWorkspaceRoutingFinalizer))
err = r.Update(context.TODO(), instance)
if err != nil {
return err
}
}
return nil
}
func (r *DevWorkspaceRoutingReconciler) markRoutingFailed(instance *controllerv1alpha1.DevWorkspaceRouting, message string) error {
instance.Status.Message = message
instance.Status.Phase = controllerv1alpha1.RoutingFailed
return r.Status().Update(context.TODO(), instance)
}
func (r *DevWorkspaceRoutingReconciler) reconcileStatus(
instance *controllerv1alpha1.DevWorkspaceRouting,
routingObjects *solvers.RoutingObjects,
exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList,
endpointsReady bool,
message string) error {
if !endpointsReady {
instance.Status.Phase = controllerv1alpha1.RoutingPreparing
instance.Status.Message = message
return r.Status().Update(context.TODO(), instance)
}
if instance.Status.Phase == controllerv1alpha1.RoutingReady &&
cmp.Equal(instance.Status.PodAdditions, routingObjects.PodAdditions) &&
cmp.Equal(instance.Status.ExposedEndpoints, exposedEndpoints) {
return nil
}
instance.Status.Phase = controllerv1alpha1.RoutingReady
instance.Status.Message = "DevWorkspaceRouting prepared"
instance.Status.PodAdditions = routingObjects.PodAdditions
instance.Status.ExposedEndpoints = exposedEndpoints
return r.Status().Update(context.TODO(), instance)
}
func contains(list []string, s string) bool {
for _, v := range list {
if v == s {
return true
}
}
return false
}
func remove(list []string, s string) []string {
for i, v := range list {
if v == s {
list = append(list[:i], list[i+1:]...)
}
}
return list
}
func (r *DevWorkspaceRoutingReconciler) SetupWithManager(mgr ctrl.Manager) error {
maxConcurrentReconciles, err := config.GetMaxConcurrentReconciles()
if err != nil {
return err
}
bld := ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
For(&controllerv1alpha1.DevWorkspaceRouting{}).
Owns(&corev1.Service{}).
Owns(&v1beta1.Ingress{})
if infrastructure.IsOpenShift() {
bld.Owns(&routeV1.Route{})
}
if r.SolverGetter == nil {
return NoSolversEnabled
}
if err := r.SolverGetter.SetupControllerManager(bld); err != nil {
return err
}
bld.WithEventFilter(getRoutingPredicatesForSolverFunc(r.SolverGetter))
return bld.Complete(r)
}

View File

@ -0,0 +1,69 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspacerouting
import (
"github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
func getRoutingPredicatesForSolverFunc(solverGetter solvers.RoutingSolverGetter) predicate.Funcs {
return predicate.Funcs{
CreateFunc: func(ev event.CreateEvent) bool {
obj, ok := ev.Object.(*controllerv1alpha1.DevWorkspaceRouting)
if !ok {
// If object is not a DevWorkspaceRouting, it must be a service/ingress/route related to the workspace
// The safe choice here is to trigger a reconcile to ensure that all resources are in sync; it's the job
// of the controller to ignore DevWorkspaceRoutings for other routing classes.
return true
}
if !solverGetter.HasSolver(obj.Spec.RoutingClass) {
return false
}
return true
},
DeleteFunc: func(_ event.DeleteEvent) bool {
// Return true to ensure objects are recreated if needed, and that finalizers are
// removed on deletion.
return true
},
UpdateFunc: func(ev event.UpdateEvent) bool {
newObj, ok := ev.ObjectNew.(*controllerv1alpha1.DevWorkspaceRouting)
if !ok {
// If object is not a DevWorkspaceRouting, it must be a service/ingress/route related to the workspace
// The safe choice here is to trigger a reconcile to ensure that all resources are in sync; it's the job
// of the controller to ignore DevWorkspaceRoutings for other routing classes.
return true
}
if !solverGetter.HasSolver(newObj.Spec.RoutingClass) {
// Future improvement: handle case where old object has a supported routingClass and new object does not
// to allow for cleanup when routingClass is switched.
return false
}
return true
},
GenericFunc: func(ev event.GenericEvent) bool {
obj, ok := ev.Object.(*controllerv1alpha1.DevWorkspaceRouting)
if !ok {
return true
}
if !solverGetter.HasSolver(obj.Spec.RoutingClass) {
return false
}
return true
},
}
}

View File

@ -0,0 +1,81 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
"errors"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/config"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
)
var routeAnnotations = func(endpointName string) map[string]string {
return map[string]string{
"haproxy.router.openshift.io/rewrite-target": "/",
constants.DevWorkspaceEndpointNameAnnotation: endpointName,
}
}
var nginxIngressAnnotations = func(endpointName string) map[string]string {
return map[string]string{
"kubernetes.io/ingress.class": "nginx",
"nginx.ingress.kubernetes.io/rewrite-target": "/",
"nginx.ingress.kubernetes.io/ssl-redirect": "false",
constants.DevWorkspaceEndpointNameAnnotation: endpointName,
}
}
// Basic solver exposes endpoints without any authentication
// According to the current cluster there is different behavior:
// Kubernetes: use Ingresses without TLS
// OpenShift: use Routes with TLS enabled
type BasicSolver struct{}
var _ RoutingSolver = (*BasicSolver)(nil)
func (s *BasicSolver) FinalizerRequired(*controllerv1alpha1.DevWorkspaceRouting) bool {
return false
}
func (s *BasicSolver) Finalize(*controllerv1alpha1.DevWorkspaceRouting) error {
return nil
}
func (s *BasicSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error) {
routingObjects := RoutingObjects{}
routingSuffix := config.ControllerCfg.GetProperty(config.RoutingSuffix)
if routingSuffix == nil {
return routingObjects, errors.New(config.RoutingSuffix + " must be set for basic routing")
}
spec := routing.Spec
services := getServicesForEndpoints(spec.Endpoints, workspaceMeta)
services = append(services, GetDiscoverableServicesForEndpoints(spec.Endpoints, workspaceMeta)...)
routingObjects.Services = services
if infrastructure.IsOpenShift() {
routingObjects.Routes = getRoutesForSpec(*routingSuffix, spec.Endpoints, workspaceMeta)
} else {
routingObjects.Ingresses = getIngressesForSpec(*routingSuffix, spec.Endpoints, workspaceMeta)
}
return routingObjects, nil
}
func (s *BasicSolver) GetExposedEndpoints(
endpoints map[string]controllerv1alpha1.EndpointList,
routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) {
return getExposedEndpoints(endpoints, routingObj)
}

View File

@ -0,0 +1,126 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
"fmt"
"github.com/devfile/devworkspace-operator/pkg/common"
"github.com/devfile/devworkspace-operator/pkg/constants"
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
corev1 "k8s.io/api/core/v1"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
const (
serviceServingCertAnnot = "service.beta.openshift.io/serving-cert-secret-name"
)
type ClusterSolver struct {
TLS bool
}
var _ RoutingSolver = (*ClusterSolver)(nil)
func (s *ClusterSolver) FinalizerRequired(*controllerv1alpha1.DevWorkspaceRouting) bool {
return false
}
func (s *ClusterSolver) Finalize(*controllerv1alpha1.DevWorkspaceRouting) error {
return nil
}
func (s *ClusterSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error) {
spec := routing.Spec
services := getServicesForEndpoints(spec.Endpoints, workspaceMeta)
podAdditions := &controllerv1alpha1.PodAdditions{}
if s.TLS {
readOnlyMode := int32(420)
for idx, service := range services {
if services[idx].Annotations == nil {
services[idx].Annotations = map[string]string{}
}
services[idx].Annotations[serviceServingCertAnnot] = service.Name
podAdditions.Volumes = append(podAdditions.Volumes, corev1.Volume{
Name: common.ServingCertVolumeName(service.Name),
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: service.Name,
DefaultMode: &readOnlyMode,
},
},
})
podAdditions.VolumeMounts = append(podAdditions.VolumeMounts, corev1.VolumeMount{
Name: common.ServingCertVolumeName(service.Name),
ReadOnly: true,
MountPath: "/var/serving-cert/",
})
}
}
return RoutingObjects{
Services: services,
PodAdditions: podAdditions,
}, nil
}
func (s *ClusterSolver) GetExposedEndpoints(
endpoints map[string]controllerv1alpha1.EndpointList,
routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) {
exposedEndpoints = map[string]controllerv1alpha1.ExposedEndpointList{}
for machineName, machineEndpoints := range endpoints {
for _, endpoint := range machineEndpoints {
if endpoint.Exposure == dw.NoneEndpointExposure {
continue
}
url, err := resolveServiceHostnameForEndpoint(endpoint, routingObj.Services)
if err != nil {
return nil, false, err
}
exposedEndpoints[machineName] = append(exposedEndpoints[machineName], controllerv1alpha1.ExposedEndpoint{
Name: endpoint.Name,
Url: url,
Attributes: endpoint.Attributes,
})
}
}
return exposedEndpoints, true, nil
}
func resolveServiceHostnameForEndpoint(endpoint dw.Endpoint, services []corev1.Service) (string, error) {
for _, service := range services {
if service.Annotations[constants.DevWorkspaceDiscoverableServiceAnnotation] == "true" {
continue
}
for _, servicePort := range service.Spec.Ports {
if servicePort.Port == int32(endpoint.TargetPort) {
return getHostnameFromService(service, servicePort.Port), nil
}
}
}
return "", fmt.Errorf("could not find service for endpoint %s", endpoint.Name)
}
func getHostnameFromService(service corev1.Service, port int32) string {
scheme := "http"
if _, ok := service.Annotations[serviceServingCertAnnot]; ok {
scheme = "https"
}
return fmt.Sprintf("%s://%s.%s.svc:%d", scheme, service.Name, service.Namespace, port)
}

View File

@ -0,0 +1,247 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/common"
"github.com/devfile/devworkspace-operator/pkg/constants"
routeV1 "github.com/openshift/api/route/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type DevWorkspaceMetadata struct {
DevWorkspaceId string
Namespace string
PodSelector map[string]string
}
// GetDiscoverableServicesForEndpoints converts the endpoint list into a set of services, each corresponding to a single discoverable
// endpoint from the list. Endpoints with the NoneEndpointExposure are ignored.
func GetDiscoverableServicesForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []corev1.Service {
var services []corev1.Service
for _, machineEndpoints := range endpoints {
for _, endpoint := range machineEndpoints {
if endpoint.Exposure == dw.NoneEndpointExposure {
continue
}
if endpoint.Attributes.GetBoolean(string(controllerv1alpha1.DiscoverableAttribute), nil) {
// Create service with name matching endpoint
// TODO: This could cause a reconcile conflict if multiple workspaces define the same discoverable endpoint
// Also endpoint names may not be valid as service names
servicePort := corev1.ServicePort{
Name: common.EndpointName(endpoint.Name),
Protocol: corev1.ProtocolTCP,
Port: int32(endpoint.TargetPort),
TargetPort: intstr.FromInt(endpoint.TargetPort),
}
services = append(services, corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: common.EndpointName(endpoint.Name),
Namespace: meta.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: meta.DevWorkspaceId,
},
Annotations: map[string]string{
constants.DevWorkspaceDiscoverableServiceAnnotation: "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{servicePort},
Selector: meta.PodSelector,
Type: corev1.ServiceTypeClusterIP,
},
})
}
}
}
return services
}
// GetServiceForEndpoints returns a single service that exposes all endpoints of given exposure types, possibly also including the discoverable types.
// `nil` is returned if the service would expose no ports satisfying the provided criteria.
func GetServiceForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata, includeDiscoverable bool, exposureType ...dw.EndpointExposure) *corev1.Service {
// "set" of ports that are still left for exposure
ports := map[int]bool{}
for _, es := range endpoints {
for _, endpoint := range es {
ports[endpoint.TargetPort] = true
}
}
// "set" of exposure types that are allowed
validExposures := map[dw.EndpointExposure]bool{}
for _, exp := range exposureType {
validExposures[exp] = true
}
var exposedPorts []corev1.ServicePort
for _, es := range endpoints {
for _, endpoint := range es {
if !validExposures[endpoint.Exposure] {
continue
}
if !includeDiscoverable && endpoint.Attributes.GetBoolean(string(controllerv1alpha1.DiscoverableAttribute), nil) {
continue
}
if ports[endpoint.TargetPort] {
// make sure we don't mention the same port twice
ports[endpoint.TargetPort] = false
exposedPorts = append(exposedPorts, corev1.ServicePort{
Name: common.EndpointName(endpoint.Name),
Protocol: corev1.ProtocolTCP,
Port: int32(endpoint.TargetPort),
TargetPort: intstr.FromInt(endpoint.TargetPort),
})
}
}
}
if len(exposedPorts) == 0 {
return nil
}
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: common.ServiceName(meta.DevWorkspaceId),
Namespace: meta.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: meta.DevWorkspaceId,
},
},
Spec: corev1.ServiceSpec{
Selector: meta.PodSelector,
Type: corev1.ServiceTypeClusterIP,
Ports: exposedPorts,
},
}
}
func getServicesForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []corev1.Service {
if len(endpoints) == 0 {
return nil
}
service := GetServiceForEndpoints(endpoints, meta, true, dw.PublicEndpointExposure, dw.InternalEndpointExposure)
if service == nil {
return nil
}
return []corev1.Service{
*service,
}
}
func getRoutesForSpec(routingSuffix string, endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []routeV1.Route {
var routes []routeV1.Route
for _, machineEndpoints := range endpoints {
for _, endpoint := range machineEndpoints {
if endpoint.Exposure != dw.PublicEndpointExposure {
continue
}
routes = append(routes, getRouteForEndpoint(routingSuffix, endpoint, meta))
}
}
return routes
}
func getIngressesForSpec(routingSuffix string, endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []v1beta1.Ingress {
var ingresses []v1beta1.Ingress
for _, machineEndpoints := range endpoints {
for _, endpoint := range machineEndpoints {
if endpoint.Exposure != dw.PublicEndpointExposure {
continue
}
ingresses = append(ingresses, getIngressForEndpoint(routingSuffix, endpoint, meta))
}
}
return ingresses
}
func getRouteForEndpoint(routingSuffix string, endpoint dw.Endpoint, meta DevWorkspaceMetadata) routeV1.Route {
targetEndpoint := intstr.FromInt(endpoint.TargetPort)
endpointName := common.EndpointName(endpoint.Name)
return routeV1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: common.RouteName(meta.DevWorkspaceId, endpointName),
Namespace: meta.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: meta.DevWorkspaceId,
},
Annotations: routeAnnotations(endpointName),
},
Spec: routeV1.RouteSpec{
Host: common.WorkspaceHostname(routingSuffix, meta.DevWorkspaceId),
Path: common.EndpointPath(endpointName),
TLS: &routeV1.TLSConfig{
InsecureEdgeTerminationPolicy: routeV1.InsecureEdgeTerminationPolicyRedirect,
Termination: routeV1.TLSTerminationEdge,
},
To: routeV1.RouteTargetReference{
Kind: "Service",
Name: common.ServiceName(meta.DevWorkspaceId),
},
Port: &routeV1.RoutePort{
TargetPort: targetEndpoint,
},
},
}
}
func getIngressForEndpoint(routingSuffix string, endpoint dw.Endpoint, meta DevWorkspaceMetadata) v1beta1.Ingress {
targetEndpoint := intstr.FromInt(endpoint.TargetPort)
endpointName := common.EndpointName(endpoint.Name)
hostname := common.EndpointHostname(routingSuffix, meta.DevWorkspaceId, endpointName, endpoint.TargetPort)
ingressPathType := v1beta1.PathTypeImplementationSpecific
return v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: common.RouteName(meta.DevWorkspaceId, endpointName),
Namespace: meta.Namespace,
Labels: map[string]string{
constants.DevWorkspaceIDLabel: meta.DevWorkspaceId,
},
Annotations: nginxIngressAnnotations(endpoint.Name),
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: hostname,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Backend: v1beta1.IngressBackend{
ServiceName: common.ServiceName(meta.DevWorkspaceId),
ServicePort: targetEndpoint,
},
PathType: &ingressPathType,
Path: "/",
},
},
},
},
},
},
},
}
}

View File

@ -0,0 +1,48 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
"errors"
"time"
)
var _ error = (*RoutingNotReady)(nil)
var _ error = (*RoutingInvalid)(nil)
// RoutingNotSupported is used by the solvers when they supported the routingclass of the workspace they've been asked to route
var RoutingNotSupported = errors.New("routingclass not supported by this controller")
// RoutingNotReady is used by the solvers when they are not ready to route an otherwise OK workspace. They can also suggest the
// duration after which to retry the workspace routing. If not specified, the retry is made after 1 second.
type RoutingNotReady struct {
Retry time.Duration
}
func (*RoutingNotReady) Error() string {
return "controller not ready to resolve the workspace routing"
}
// RoutingInvalid is used by the solvers to report that they were asked to route a workspace that has the correct routingclass but
// is invalid in some other sense - missing configuration, etc.
type RoutingInvalid struct {
Reason string
}
func (e *RoutingInvalid) Error() string {
reason := "<no reason given>"
if len(e.Reason) > 0 {
reason = e.Reason
}
return "workspace routing is invalid: " + reason
}

View File

@ -0,0 +1,107 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
"fmt"
"net/url"
"strings"
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/constants"
)
func getExposedEndpoints(
endpoints map[string]controllerv1alpha1.EndpointList,
routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) {
exposedEndpoints = map[string]controllerv1alpha1.ExposedEndpointList{}
ready = true
for machineName, machineEndpoints := range endpoints {
for _, endpoint := range machineEndpoints {
if endpoint.Exposure != dw.PublicEndpointExposure {
continue
}
endpointUrl, err := resolveURLForEndpoint(endpoint, routingObj)
if err != nil {
return nil, false, err
}
if endpointUrl == "" {
ready = false
}
exposedEndpoints[machineName] = append(exposedEndpoints[machineName], controllerv1alpha1.ExposedEndpoint{
Name: endpoint.Name,
Url: endpointUrl,
Attributes: endpoint.Attributes,
})
}
}
return exposedEndpoints, ready, nil
}
func resolveURLForEndpoint(
endpoint dw.Endpoint,
routingObj RoutingObjects) (string, error) {
for _, route := range routingObj.Routes {
if route.Annotations[constants.DevWorkspaceEndpointNameAnnotation] == endpoint.Name {
return getURLForEndpoint(endpoint, route.Spec.Host, route.Spec.Path, route.Spec.TLS != nil), nil
}
}
for _, ingress := range routingObj.Ingresses {
if ingress.Annotations[constants.DevWorkspaceEndpointNameAnnotation] == endpoint.Name {
if len(ingress.Spec.Rules) == 1 {
return getURLForEndpoint(endpoint, ingress.Spec.Rules[0].Host, "", false), nil // no TLS supported for ingresses yet
} else {
return "", fmt.Errorf("ingress %s contains multiple rules", ingress.Name)
}
}
}
return "", fmt.Errorf("could not find ingress/route for endpoint '%s'", endpoint.Name)
}
func getURLForEndpoint(endpoint dw.Endpoint, host, basePath string, secure bool) string {
protocol := endpoint.Protocol
if secure && endpoint.Secure {
protocol = dw.EndpointProtocol(getSecureProtocol(string(protocol)))
}
var p string
if endpoint.Path != "" {
// the only one slash should be between these path segments.
// Path.join does not suite here since it eats trailing slash which may be critical for the application
p = fmt.Sprintf("%s/%s", strings.TrimRight(basePath, "/"), strings.TrimLeft(p, endpoint.Path))
} else {
p = basePath
}
u := url.URL{
Scheme: string(protocol),
Host: host,
Path: p,
}
return u.String()
}
// getSecureProtocol takes a (potentially unsecure protocol e.g. http) and returns the secure version (e.g. https).
// If protocol isn't recognized, it is returned unmodified.
func getSecureProtocol(protocol string) string {
switch protocol {
case "ws":
return "wss"
case "http":
return "https"
default:
return protocol
}
}

View File

@ -0,0 +1,119 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package solvers
import (
"fmt"
routeV1 "github.com/openshift/api/route/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
)
type RoutingObjects struct {
Services []v1.Service
Ingresses []v1beta1.Ingress
Routes []routeV1.Route
PodAdditions *controllerv1alpha1.PodAdditions
}
type RoutingSolver interface {
// FinalizerRequired tells the caller if the solver requires a finalizer on the routing object.
FinalizerRequired(routing *controllerv1alpha1.DevWorkspaceRouting) bool
// Finalize implements the custom finalization logic required by the solver. The solver doesn't have to
// remove any finalizer from the finalizer list on the routing. Instead just implement the custom
// logic required for the finalization itself. If this method doesn't return any error, the finalizer
// is automatically removed from the routing.
Finalize(routing *controllerv1alpha1.DevWorkspaceRouting) error
// GetSpecObjects constructs cluster routing objects which should be applied on the cluster
// This method should return RoutingNotReady error if the solver is not ready yet to process
// the workspace routing, RoutingInvalid error if there is a specific reason for the failure or
// any other error.
// The implementors can also create any additional objects not captured by the RoutingObjects struct. If that's
// the case they are required to set the restricted access annotation on any objects created according to the
// restricted access specified by the routing.
GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error)
// GetExposedEndpoints retreives the URL for each endpoint in a devfile spec from a set of RoutingObjects.
// Returns is a map from component ids (as defined in the devfile) to the list of endpoints for that component
// Return value "ready" specifies if all endpoints are resolved on the cluster; if false it is necessary to retry, as
// URLs will be undefined.
GetExposedEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error)
}
type RoutingSolverGetter interface {
// SetupControllerManager is called during the setup of the controller and can modify the controller manager with additional
// watches, etc., needed for the correct operation of the solver.
SetupControllerManager(mgr *builder.Builder) error
// HasSolver returns whether the provided routingClass is supported by this RoutingSolverGetter. Returns false if
// calling GetSolver with routingClass will return a RoutingNotSupported error. Can be used to check if a routingClass
// is supported without having to provide a runtime client. Note that GetSolver may still return another error, if e.g.
// an OpenShift-only routingClass is used on a vanilla Kubernetes platform.
HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool
// GetSolver that obtains a Solver (see github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers)
// for a particular DevWorkspaceRouting instance. This function should return a RoutingNotSupported error if
// the routingClass is not recognized, and any other error if the routingClass is invalid (e.g. an OpenShift-only
// routingClass on a vanilla Kubernetes platform). Note that an empty routingClass is handled by the DevWorkspace controller itself,
// and should not be handled by external controllers.
GetSolver(client client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (solver RoutingSolver, err error)
}
type SolverGetter struct{}
var _ RoutingSolverGetter = (*SolverGetter)(nil)
func (_ *SolverGetter) HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool {
if routingClass == "" {
// Special case for built-in: empty routing class returns the default solver for the DevWorkspace controller.
return true
}
switch routingClass {
case controllerv1alpha1.DevWorkspaceRoutingBasic,
controllerv1alpha1.DevWorkspaceRoutingCluster,
controllerv1alpha1.DevWorkspaceRoutingClusterTLS,
controllerv1alpha1.DevWorkspaceRoutingWebTerminal:
return true
default:
return false
}
}
func (_ *SolverGetter) GetSolver(_ client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (RoutingSolver, error) {
isOpenShift := infrastructure.IsOpenShift()
switch routingClass {
case controllerv1alpha1.DevWorkspaceRoutingBasic:
return &BasicSolver{}, nil
case controllerv1alpha1.DevWorkspaceRoutingCluster:
return &ClusterSolver{}, nil
case controllerv1alpha1.DevWorkspaceRoutingClusterTLS, controllerv1alpha1.DevWorkspaceRoutingWebTerminal:
if !isOpenShift {
return nil, fmt.Errorf("routing class %s only supported on OpenShift", routingClass)
}
return &ClusterSolver{TLS: true}, nil
default:
return nil, RoutingNotSupported
}
}
func (*SolverGetter) SetupControllerManager(*builder.Builder) error {
return nil
}

View File

@ -0,0 +1,111 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspacerouting
import (
"context"
"fmt"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
var ingressDiffOpts = cmp.Options{
cmpopts.IgnoreFields(v1beta1.Ingress{}, "TypeMeta", "ObjectMeta", "Status"),
cmpopts.IgnoreFields(v1beta1.HTTPIngressPath{}, "PathType"),
}
func (r *DevWorkspaceRoutingReconciler) syncIngresses(routing *controllerv1alpha1.DevWorkspaceRouting, specIngresses []v1beta1.Ingress) (ok bool, clusterIngresses []v1beta1.Ingress, err error) {
ingressesInSync := true
clusterIngresses, err = r.getClusterIngresses(routing)
if err != nil {
return false, nil, err
}
toDelete := getIngressesToDelete(clusterIngresses, specIngresses)
for _, ingress := range toDelete {
err := r.Delete(context.TODO(), &ingress)
if err != nil {
return false, nil, err
}
ingressesInSync = false
}
for _, specIngress := range specIngresses {
if contains, idx := listContainsIngressByName(specIngress, clusterIngresses); contains {
clusterIngress := clusterIngresses[idx]
if !cmp.Equal(specIngress, clusterIngress, ingressDiffOpts) {
// Update ingress's spec
clusterIngress.Spec = specIngress.Spec
err := r.Update(context.TODO(), &clusterIngress)
if err != nil && !errors.IsConflict(err) {
return false, nil, err
}
ingressesInSync = false
}
} else {
err := r.Create(context.TODO(), &specIngress)
if err != nil {
return false, nil, err
}
ingressesInSync = false
}
}
return ingressesInSync, clusterIngresses, nil
}
func (r *DevWorkspaceRoutingReconciler) getClusterIngresses(routing *controllerv1alpha1.DevWorkspaceRouting) ([]v1beta1.Ingress, error) {
found := &v1beta1.IngressList{}
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId))
if err != nil {
return nil, err
}
listOptions := &client.ListOptions{
Namespace: routing.Namespace,
LabelSelector: labelSelector,
}
err = r.List(context.TODO(), found, listOptions)
if err != nil {
return nil, err
}
return found.Items, nil
}
func getIngressesToDelete(clusterIngresses, specIngresses []v1beta1.Ingress) []v1beta1.Ingress {
var toDelete []v1beta1.Ingress
for _, clusterIngress := range clusterIngresses {
if contains, _ := listContainsIngressByName(clusterIngress, specIngresses); !contains {
toDelete = append(toDelete, clusterIngress)
}
}
return toDelete
}
func listContainsIngressByName(query v1beta1.Ingress, list []v1beta1.Ingress) (exists bool, idx int) {
for idx, listIngress := range list {
if query.Name == listIngress.Name {
return true, idx
}
}
return false, -1
}

View File

@ -0,0 +1,124 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspacerouting
import (
"context"
"fmt"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
routeV1 "github.com/openshift/api/route/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
var routeDiffOpts = cmp.Options{
cmpopts.IgnoreFields(routeV1.Route{}, "TypeMeta", "ObjectMeta", "Status"),
cmpopts.IgnoreFields(routeV1.RouteSpec{}, "WildcardPolicy", "Host"),
cmpopts.IgnoreFields(routeV1.RouteTargetReference{}, "Weight"),
}
func (r *DevWorkspaceRoutingReconciler) syncRoutes(routing *controllerv1alpha1.DevWorkspaceRouting, specRoutes []routeV1.Route) (ok bool, clusterRoutes []routeV1.Route, err error) {
routesInSync := true
clusterRoutes, err = r.getClusterRoutes(routing)
if err != nil {
return false, nil, err
}
toDelete := getRoutesToDelete(clusterRoutes, specRoutes)
for _, route := range toDelete {
err := r.Delete(context.TODO(), &route)
if err != nil {
return false, nil, err
}
routesInSync = false
}
for _, specRoute := range specRoutes {
if contains, idx := listContainsRouteByName(specRoute, clusterRoutes); contains {
clusterRoute := clusterRoutes[idx]
if !cmp.Equal(specRoute, clusterRoute, routeDiffOpts) {
// Update route's spec
clusterRoute.Spec = specRoute.Spec
err := r.Update(context.TODO(), &clusterRoute)
if err != nil && !errors.IsConflict(err) {
return false, nil, err
}
routesInSync = false
}
} else {
err := r.Create(context.TODO(), &specRoute)
if err != nil {
return false, nil, err
}
routesInSync = false
}
}
return routesInSync, clusterRoutes, nil
}
func (r *DevWorkspaceRoutingReconciler) getClusterRoutes(routing *controllerv1alpha1.DevWorkspaceRouting) ([]routeV1.Route, error) {
found := &routeV1.RouteList{}
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId))
if err != nil {
return nil, err
}
listOptions := &client.ListOptions{
Namespace: routing.Namespace,
LabelSelector: labelSelector,
}
err = r.List(context.TODO(), found, listOptions)
if err != nil {
return nil, err
}
var routes []routeV1.Route
for _, route := range found.Items {
for _, ownerref := range route.OwnerReferences {
// We need to filter routes that are created automatically for ingresses on OpenShift
if ownerref.Kind == "Ingress" {
continue
}
routes = append(routes, route)
}
}
return routes, nil
}
func getRoutesToDelete(clusterRoutes, specRoutes []routeV1.Route) []routeV1.Route {
var toDelete []routeV1.Route
for _, clusterRoute := range clusterRoutes {
if contains, _ := listContainsRouteByName(clusterRoute, specRoutes); !contains {
toDelete = append(toDelete, clusterRoute)
}
}
return toDelete
}
func listContainsRouteByName(query routeV1.Route, list []routeV1.Route) (exists bool, idx int) {
for idx, listRoute := range list {
if query.Name == listRoute.Name {
return true, idx
}
}
return false, -1
}

View File

@ -0,0 +1,118 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspacerouting
import (
"context"
"fmt"
"strings"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
var serviceDiffOpts = cmp.Options{
cmpopts.IgnoreFields(corev1.Service{}, "TypeMeta", "ObjectMeta", "Status"),
cmpopts.IgnoreFields(corev1.ServiceSpec{}, "ClusterIP", "ClusterIPs", "IPFamilies", "IPFamilyPolicy", "SessionAffinity"),
cmpopts.IgnoreFields(corev1.ServicePort{}, "TargetPort"),
cmpopts.SortSlices(func(a, b corev1.ServicePort) bool {
return strings.Compare(a.Name, b.Name) > 0
}),
}
func (r *DevWorkspaceRoutingReconciler) syncServices(routing *controllerv1alpha1.DevWorkspaceRouting, specServices []corev1.Service) (ok bool, clusterServices []corev1.Service, err error) {
servicesInSync := true
clusterServices, err = r.getClusterServices(routing)
if err != nil {
return false, nil, err
}
toDelete := getServicesToDelete(clusterServices, specServices)
for _, service := range toDelete {
err := r.Delete(context.TODO(), &service)
if err != nil {
return false, nil, err
}
servicesInSync = false
}
for _, specService := range specServices {
if contains, idx := listContainsByName(specService, clusterServices); contains {
clusterService := clusterServices[idx]
if !cmp.Equal(specService, clusterService, serviceDiffOpts) {
// Cannot naively copy spec, as clusterIP is unmodifiable
clusterIP := clusterService.Spec.ClusterIP
clusterService.Spec = specService.Spec
clusterService.Spec.ClusterIP = clusterIP
err := r.Update(context.TODO(), &clusterService)
if err != nil && !errors.IsConflict(err) {
return false, nil, err
}
servicesInSync = false
}
} else {
err := r.Create(context.TODO(), &specService)
if err != nil {
return false, nil, err
}
servicesInSync = false
}
}
return servicesInSync, clusterServices, nil
}
func (r *DevWorkspaceRoutingReconciler) getClusterServices(routing *controllerv1alpha1.DevWorkspaceRouting) ([]corev1.Service, error) {
found := &corev1.ServiceList{}
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId))
if err != nil {
return nil, err
}
listOptions := &client.ListOptions{
Namespace: routing.Namespace,
LabelSelector: labelSelector,
}
err = r.List(context.TODO(), found, listOptions)
if err != nil {
return nil, err
}
return found.Items, nil
}
func getServicesToDelete(clusterServices, specServices []corev1.Service) []corev1.Service {
var toDelete []corev1.Service
for _, clusterService := range clusterServices {
if contains, _ := listContainsByName(clusterService, specServices); !contains {
toDelete = append(toDelete, clusterService)
}
}
return toDelete
}
func listContainsByName(query corev1.Service, list []corev1.Service) (exists bool, idx int) {
for idx, listService := range list {
if query.Name == listService.Name {
return true, idx
}
}
return false, -1
}

View File

@ -0,0 +1,152 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
// Package images is intended to support deploying the operator on restricted networks. It contains
// utilities for translating images referenced by environment variables to regular image references,
// allowing images that are defined by a tag to be replaced by digests automatically. This allows all
// images used by the controller to be defined as environment variables on the controller deployment.
//
// All images defined must be referenced by an environment variable of the form RELATED_IMAGE_<name>.
// Functions in this package can be called to replace references to ${RELATED_IMAGE_<name>} with the
// corresponding environment variable.
package images
import (
"fmt"
"os"
"regexp"
dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
var log = logf.Log.WithName("container-images")
var envRegexp = regexp.MustCompile(`\${(RELATED_IMAGE_.*)}`)
const (
webTerminalToolingImageEnvVar = "RELATED_IMAGE_web_terminal_tooling"
webhookServerImageEnvVar = "RELATED_IMAGE_devworkspace_webhook_server"
kubeRBACProxyImageEnvVar = "RELATED_IMAGE_kube_rbac_proxy"
pvcCleanupJobImageEnvVar = "RELATED_IMAGE_pvc_cleanup_job"
asyncStorageServerImageEnvVar = "RELATED_IMAGE_async_storage_server"
asyncStorageSidecarImageEnvVar = "RELATED_IMAGE_async_storage_sidecar"
projectCloneImageEnvVar = "RELATED_IMAGE_project_clone"
)
// GetWebhookServerImage returns the image reference for the webhook server image. Returns
// the empty string if environment variable RELATED_IMAGE_devworkspace_webhook_server is not defined
func GetWebhookServerImage() string {
val, ok := os.LookupEnv(webhookServerImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", webhookServerImageEnvVar), "Could not get webhook server image")
return ""
}
return val
}
// GetKubeRBACProxyImage returns the image reference for the kube RBAC proxy. Returns
// the empty string if environment variable RELATED_IMAGE_kube_rbac_proxy is not defined
func GetKubeRBACProxyImage() string {
val, ok := os.LookupEnv(kubeRBACProxyImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", kubeRBACProxyImageEnvVar), "Could not get webhook server image")
return ""
}
return val
}
// GetWebTerminalToolingImage returns the image reference for the default web tooling image. Returns
// the empty string if environment variable RELATED_IMAGE_web_terminal_tooling is not defined
func GetWebTerminalToolingImage() string {
val, ok := os.LookupEnv(webTerminalToolingImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", webTerminalToolingImageEnvVar), "Could not get web terminal tooling image")
return ""
}
return val
}
// GetPVCCleanupJobImage returns the image reference for the PVC cleanup job used to clean workspace
// files from the common PVC in a namespace.
func GetPVCCleanupJobImage() string {
val, ok := os.LookupEnv(pvcCleanupJobImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", pvcCleanupJobImageEnvVar), "Could not get PVC cleanup job image")
return ""
}
return val
}
func GetAsyncStorageServerImage() string {
val, ok := os.LookupEnv(asyncStorageServerImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", asyncStorageServerImageEnvVar), "Could not get async storage server image")
return ""
}
return val
}
func GetAsyncStorageSidecarImage() string {
val, ok := os.LookupEnv(asyncStorageSidecarImageEnvVar)
if !ok {
log.Error(fmt.Errorf("environment variable %s is not set", asyncStorageSidecarImageEnvVar), "Could not get async storage sidecar image")
return ""
}
return val
}
func GetProjectClonerImage() string {
val, ok := os.LookupEnv(projectCloneImageEnvVar)
if !ok {
log.Info(fmt.Sprintf("Could not get initial project clone image: environment variable %s is not set", projectCloneImageEnvVar))
return ""
}
return val
}
// FillPluginEnvVars replaces plugin devworkspaceTemplate .spec.components[].container.image environment
// variables of the form ${RELATED_IMAGE_*} with values from environment variables with the same name.
//
// Returns error if any referenced environment variable is undefined.
func FillPluginEnvVars(pluginDWT *dw.DevWorkspaceTemplate) (*dw.DevWorkspaceTemplate, error) {
for idx, component := range pluginDWT.Spec.Components {
if component.Container == nil {
continue
}
img, err := getImageForEnvVar(component.Container.Image)
if err != nil {
return nil, err
}
pluginDWT.Spec.Components[idx].Container.Image = img
}
return pluginDWT, nil
}
func isImageEnvVar(query string) bool {
return envRegexp.MatchString(query)
}
func getImageForEnvVar(envStr string) (string, error) {
if !isImageEnvVar(envStr) {
// Value passed in is not env var, return unmodified
return envStr, nil
}
matches := envRegexp.FindStringSubmatch(envStr)
env := matches[1]
val, ok := os.LookupEnv(env)
if !ok {
log.Info(fmt.Sprintf("Environment variable '%s' is unset. Cannot determine image to use", env))
return "", fmt.Errorf("environment variable %s is unset", env)
}
return val, nil
}

Some files were not shown because too many files have changed in this diff Show More