feat: Syncing of proxy settings and self-signed cert to the user's workspace namespace (#1027)

Syncing of proxy settings and self-signed cert to the user's workspace namespace.

Co-authored-by: Michal Vala <mvala@redhat.com>
Co-authored-by: Serhii Leshchenko <sleshche@redhat.com>
pull/1084/head
Lukas Krejci 2021-09-21 21:00:53 +02:00 committed by GitHub
parent 0ae0af669a
commit 5e657f876b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 2925 additions and 36 deletions

33
.github/bin/common.sh vendored
View File

@ -167,24 +167,49 @@ installYq() {
# Graps Eclipse Che logs # Graps Eclipse Che logs
collectLogs() { collectLogs() {
mkdir -p ${ARTIFACTS_DIR} mkdir -p ${ARTIFACTS_DIR}
chectl server:logs --chenamespace=${NAMESPACE} --directory=${ARTIFACTS_DIR}
set +x set +e
chectl server:logs --chenamespace=${NAMESPACE} --directory=${ARTIFACTS_DIR}
collectDevworkspaceOperatorLogs
oc get events -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} > ${ARTIFACTS_DIR}/events-${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}.txt oc get events -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} > ${ARTIFACTS_DIR}/events-${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}.txt
oc get events -n ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} > ${ARTIFACTS_DIR}/events-${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}.txt oc get events -n ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} > ${ARTIFACTS_DIR}/events-${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE}.txt
set -x set -e
}
collectDevworkspaceOperatorLogs() {
mkdir -p ${ARTIFACTS_DIR}/devworkspace-operator
oc get events -n devworkspace-controller > ${ARTIFACTS_DIR}/events-devworkspace-controller.txt
#determine the name of the devworkspace controller manager pod
local CONTROLLER_POD_NAME=$(oc get pods -n devworkspace-controller -l app.kubernetes.io/name=devworkspace-controller -o json | jq -r '.items[0].metadata.name')
local WEBHOOK_SVR_POD_NAME=$(oc get pods -n devworkspace-controller -l app.kubernetes.io/name=devworkspace-webhook-server -o json | jq -r '.items[0].metadata.name')
# save the logs of all the containers in the DWO pod
for container in $(oc get pod -n devworkspace-controller ${CONTROLLER_POD_NAME} -o json | jq -r '.spec.containers[] | .name'); do
mkdir -p ${ARTIFACTS_DIR}/devworkspace-operator/${CONTROLLER_POD_NAME}
oc logs -n devworkspace-controller deployment/devworkspace-controller-manager -c ${container} > ${ARTIFACTS_DIR}/devworkspace-operator/${CONTROLLER_POD_NAME}/${container}.log
done
for container in $(oc get pod -n devworkspace-controller ${WEBHOOK_SVR_POD_NAME} -o json | jq -r '.spec.containers[] | .name'); do
mkdir -p ${ARTIFACTS_DIR}/devworkspace-operator/${WEBHOOK_SVR_POD_NAME}
oc logs -n devworkspace-controller deployment/devworkspace-webhook-server -c ${container} > ${ARTIFACTS_DIR}/devworkspace-operator/${WEBHOOK_SVR_POD_NAME}/${container}.log
done
} }
# Build latest operator image # Build latest operator image
buildCheOperatorImage() { buildCheOperatorImage() {
#docker build -t "${OPERATOR_IMAGE}" -f Dockerfile .
docker build -t "${OPERATOR_IMAGE}" -f Dockerfile . && docker save "${OPERATOR_IMAGE}" > /tmp/operator.tar docker build -t "${OPERATOR_IMAGE}" -f Dockerfile . && docker save "${OPERATOR_IMAGE}" > /tmp/operator.tar
} }
copyCheOperatorImageToMinikube() { copyCheOperatorImageToMinikube() {
#docker save "${OPERATOR_IMAGE}" | minikube ssh --native-ssh=false -- docker load
eval $(minikube docker-env) && docker load -i /tmp/operator.tar && rm /tmp/operator.tar eval $(minikube docker-env) && docker load -i /tmp/operator.tar && rm /tmp/operator.tar
} }
copyCheOperatorImageToMinishift() { copyCheOperatorImageToMinishift() {
#docker save -o "${OPERATOR_IMAGE}" | minishift ssh "docker load"
eval $(minishift docker-env) && docker load -i /tmp/operator.tar && rm /tmp/operator.tar eval $(minishift docker-env) && docker load -i /tmp/operator.tar && rm /tmp/operator.tar
} }
@ -508,7 +533,7 @@ waitAllPodsRunning() {
fi fi
kubectl get pods -n ${namespace} kubectl get pods -n ${namespace}
sleep 5 sleep 10
n=$(( n+1 )) n=$(( n+1 ))
done done

View File

@ -25,7 +25,7 @@ type CheClusterSpec struct {
// Configuration of the workspace endpoints that are exposed on separate domains, as opposed to the subpaths // Configuration of the workspace endpoints that are exposed on separate domains, as opposed to the subpaths
// of the gateway. // of the gateway.
WorkspaceDomainEndpoints `json:"workspaceDomainEndpoints,omitempty"` WorkspaceDomainEndpoints WorkspaceDomainEndpoints `json:"workspaceDomainEndpoints,omitempty"`
// Gateway contains the configuration of the gateway used for workspace endpoint routing. // Gateway contains the configuration of the gateway used for workspace endpoint routing.
Gateway CheGatewaySpec `json:"gateway,omitempty"` Gateway CheGatewaySpec `json:"gateway,omitempty"`

View File

@ -83,7 +83,7 @@ metadata:
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/eclipse-che/che-operator repository: https://github.com/eclipse-che/che-operator
support: Eclipse Foundation support: Eclipse Foundation
name: eclipse-che-preview-kubernetes.v7.37.0-308.next name: eclipse-che-preview-kubernetes.v7.37.0-309.next
namespace: placeholder namespace: placeholder
spec: spec:
apiservicedefinitions: {} apiservicedefinitions: {}
@ -339,6 +339,7 @@ spec:
- list - list
- create - create
- update - update
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -870,7 +871,7 @@ spec:
- name: RELATED_IMAGE_che_tls_secrets_creation_job - name: RELATED_IMAGE_che_tls_secrets_creation_job
value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad
- name: RELATED_IMAGE_pvc_jobs - name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-208 value: registry.access.redhat.com/ubi8-minimal:8.4-210
- name: RELATED_IMAGE_postgres - name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak - name: RELATED_IMAGE_keycloak
@ -1174,4 +1175,4 @@ spec:
maturity: stable maturity: stable
provider: provider:
name: Eclipse Foundation name: Eclipse Foundation
version: 7.37.0-308.next version: 7.37.0-309.next

View File

@ -76,7 +76,7 @@ metadata:
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/eclipse-che/che-operator repository: https://github.com/eclipse-che/che-operator
support: Eclipse Foundation support: Eclipse Foundation
name: eclipse-che-preview-openshift.v7.37.0-315.next name: eclipse-che-preview-openshift.v7.37.0-316.next
namespace: placeholder namespace: placeholder
spec: spec:
apiservicedefinitions: {} apiservicedefinitions: {}
@ -344,6 +344,7 @@ spec:
verbs: verbs:
- get - get
- list - list
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -353,6 +354,7 @@ spec:
- list - list
- create - create
- update - update
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -872,7 +874,7 @@ spec:
- name: RELATED_IMAGE_devfile_registry - name: RELATED_IMAGE_devfile_registry
value: quay.io/eclipse/che-devfile-registry:next value: quay.io/eclipse/che-devfile-registry:next
- name: RELATED_IMAGE_pvc_jobs - name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-208 value: registry.access.redhat.com/ubi8-minimal:8.4-210
- name: RELATED_IMAGE_postgres - name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak - name: RELATED_IMAGE_keycloak
@ -1198,4 +1200,4 @@ spec:
maturity: stable maturity: stable
provider: provider:
name: Eclipse Foundation name: Eclipse Foundation
version: 7.37.0-315.next version: 7.37.0-316.next

View File

@ -68,7 +68,7 @@ spec:
- name: RELATED_IMAGE_che_tls_secrets_creation_job - name: RELATED_IMAGE_che_tls_secrets_creation_job
value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad
- name: RELATED_IMAGE_pvc_jobs - name: RELATED_IMAGE_pvc_jobs
value: registry.access.redhat.com/ubi8-minimal:8.4-208 value: registry.access.redhat.com/ubi8-minimal:8.4-210
- name: RELATED_IMAGE_postgres - name: RELATED_IMAGE_postgres
value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392
- name: RELATED_IMAGE_keycloak - name: RELATED_IMAGE_keycloak

View File

@ -143,6 +143,7 @@ rules:
verbs: verbs:
- get - get
- list - list
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -152,6 +153,7 @@ rules:
- list - list
- create - create
- update - update
- watch
- apiGroups: - apiGroups:
- '' - ''
resources: resources:

View File

@ -370,7 +370,7 @@ func (r *CheClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error)
} }
// Read proxy configuration // Read proxy configuration
proxy, err := r.getProxyConfiguration(deployContext) proxy, err := GetProxyConfiguration(deployContext)
if err != nil { if err != nil {
r.Log.Error(err, "Error on reading proxy configuration") r.Log.Error(err, "Error on reading proxy configuration")
return ctrl.Result{}, err return ctrl.Result{}, err

View File

@ -17,7 +17,7 @@ import (
configv1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/api/config/v1"
) )
func (r *CheClusterReconciler) getProxyConfiguration(deployContext *deploy.DeployContext) (*deploy.Proxy, error) { func GetProxyConfiguration(deployContext *deploy.DeployContext) (*deploy.Proxy, error) {
// OpenShift 4.x // OpenShift 4.x
if util.IsOpenShift4 { if util.IsOpenShift4 {
clusterProxy := &configv1.Proxy{} clusterProxy := &configv1.Proxy{}

View File

@ -318,7 +318,6 @@ func TestReadProxyConfiguration(t *testing.T) {
scheme.AddKnownTypes(configv1.SchemeGroupVersion, &configv1.Proxy{}) scheme.AddKnownTypes(configv1.SchemeGroupVersion, &configv1.Proxy{})
cli := fake.NewFakeClientWithScheme(scheme, testCase.initObjects...) cli := fake.NewFakeClientWithScheme(scheme, testCase.initObjects...)
nonCachedClient := fake.NewFakeClientWithScheme(scheme, testCase.initObjects...)
clientSet := fakeclientset.NewSimpleClientset() clientSet := fakeclientset.NewSimpleClientset()
fakeDiscovery, _ := clientSet.Discovery().(*fakeDiscovery.FakeDiscovery) fakeDiscovery, _ := clientSet.Discovery().(*fakeDiscovery.FakeDiscovery)
fakeDiscovery.Fake.Resources = []*metav1.APIResourceList{} fakeDiscovery.Fake.Resources = []*metav1.APIResourceList{}
@ -326,13 +325,6 @@ func TestReadProxyConfiguration(t *testing.T) {
os.Setenv("OPENSHIFT_VERSION", testCase.openShiftVersion) os.Setenv("OPENSHIFT_VERSION", testCase.openShiftVersion)
util.IsOpenShift, util.IsOpenShift4, _ = util.DetectOpenShift() util.IsOpenShift, util.IsOpenShift4, _ = util.DetectOpenShift()
r := &CheClusterReconciler{
client: cli,
nonCachedClient: nonCachedClient,
discoveryClient: fakeDiscovery,
Scheme: scheme,
}
deployContext := &deploy.DeployContext{ deployContext := &deploy.DeployContext{
CheCluster: testCase.cheCluster, CheCluster: testCase.cheCluster,
ClusterAPI: deploy.ClusterAPI{ ClusterAPI: deploy.ClusterAPI{
@ -342,7 +334,7 @@ func TestReadProxyConfiguration(t *testing.T) {
}, },
} }
actualProxyConf, err := r.getProxyConfiguration(deployContext) actualProxyConf, err := GetProxyConfiguration(deployContext)
if err != nil { if err != nil {
t.Fatalf("Error reading proxy configuration: %v", err) t.Fatalf("Error reading proxy configuration: %v", err)
} }

View File

@ -87,6 +87,15 @@ func GetCurrentCheClusterInstances() map[client.ObjectKey]v2alpha1.CheCluster {
return ret return ret
} }
// CleanCheClusterInstancesForTest is a helper function for test code in other packages that needs
// to re-initialize the state of the checluster instance cache.
func CleanCheClusterInstancesForTest() {
cheInstancesAccess.Lock()
defer cheInstancesAccess.Unlock()
currentCheInstances = map[client.ObjectKey]v2alpha1.CheCluster{}
}
// New returns a new instance of the Che manager reconciler. This is mainly useful for // New returns a new instance of the Che manager reconciler. This is mainly useful for
// testing because it doesn't set up any watches in the cluster, etc. For that use SetupWithManager. // testing because it doesn't set up any watches in the cluster, etc. For that use SetupWithManager.
func New(cl client.Client, scheme *runtime.Scheme) CheClusterReconciler { func New(cl client.Client, scheme *runtime.Scheme) CheClusterReconciler {
@ -148,13 +157,11 @@ func (r *CheClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error)
return ctrl.Result{}, r.finalize(ctx, current, currentV1) return ctrl.Result{}, r.finalize(ctx, current, currentV1)
} }
var disabledMessage string disabledMessage := ""
switch GetDevworkspaceState(r.scheme, current) {
if !r.scheme.IsGroupRegistered("controller.devfile.io") { case DevworkspaceStateNotPresent:
disabledMessage = "Devworkspace CRDs are not installed" disabledMessage = "Devworkspace CRDs are not installed"
} case DevworkspaceStateDisabled:
if disabledMessage == "" && !current.Spec.IsEnabled() {
disabledMessage = "Devworkspace Che is disabled" disabledMessage = "Devworkspace Che is disabled"
} }

View File

@ -67,7 +67,7 @@ func (e *RouteExposer) initFrom(ctx context.Context, cl client.Client, cluster *
if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" { if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" {
secret := &corev1.Secret{} secret := &corev1.Secret{}
err := cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret) err := cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName, Namespace: cluster.Namespace}, secret)
if err != nil { if err != nil {
return err return err
} }
@ -94,7 +94,7 @@ func (e *IngressExposer) initFrom(ctx context.Context, cl client.Client, cluster
err := cl.Get(ctx, client.ObjectKey{Name: tlsSecretName, Namespace: routing.Namespace}, secret) err := cl.Get(ctx, client.ObjectKey{Name: tlsSecretName, Namespace: routing.Namespace}, secret)
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
secret = &corev1.Secret{} secret = &corev1.Secret{}
err = cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret) err = cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName, Namespace: cluster.Namespace}, secret)
if err != nil { if err != nil {
return err return err
} }

View File

@ -0,0 +1,38 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package devworkspace
import (
chev2alpha1 "github.com/eclipse-che/che-operator/api/v2alpha1"
"k8s.io/apimachinery/pkg/runtime"
)
type DevworkspaceState int
const (
DevworkspaceStateNotPresent DevworkspaceState = 0
DevworkspaceStateDisabled DevworkspaceState = 1
DevworkspaceStateEnabled DevworkspaceState = 2
)
func GetDevworkspaceState(scheme *runtime.Scheme, cr *chev2alpha1.CheCluster) DevworkspaceState {
if !scheme.IsGroupRegistered("controller.devfile.io") {
return DevworkspaceStateNotPresent
}
if !cr.Spec.IsEnabled() {
return DevworkspaceStateDisabled
}
return DevworkspaceStateEnabled
}

View File

@ -0,0 +1,404 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package usernamespace
import (
"context"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
org "github.com/eclipse-che/che-operator/api"
v1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/api/v2alpha1"
"github.com/eclipse-che/che-operator/controllers/che"
"github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/controllers/devworkspace/defaults"
"github.com/eclipse-che/che-operator/pkg/deploy"
projectv1 "github.com/openshift/api/project/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
userSettingsComponentLabelValue = "user-settings"
)
type CheUserNamespaceReconciler struct {
client client.Client
scheme *runtime.Scheme
namespaceCache namespaceCache
}
var _ reconcile.Reconciler = (*CheUserNamespaceReconciler)(nil)
func NewReconciler() *CheUserNamespaceReconciler {
return &CheUserNamespaceReconciler{namespaceCache: *NewNamespaceCache()}
}
func (r *CheUserNamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.scheme = mgr.GetScheme()
r.client = mgr.GetClient()
r.namespaceCache.client = r.client
var obj runtime.Object
if infrastructure.IsOpenShift() {
obj = &projectv1.Project{}
} else {
obj = &corev1.Namespace{}
}
ctx := context.Background()
bld := ctrl.NewControllerManagedBy(mgr).
For(obj).
Watches(&source.Kind{Type: &corev1.Secret{}}, r.watchRulesForSecrets(ctx)).
Watches(&source.Kind{Type: &corev1.ConfigMap{}}, r.watchRulesForConfigMaps(ctx)).
Watches(&source.Kind{Type: &v1.CheCluster{}}, r.triggerAllNamespaces(ctx))
return bld.Complete(r)
}
func (r *CheUserNamespaceReconciler) watchRulesForSecrets(ctx context.Context) handler.EventHandler {
return &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(func(mo handler.MapObject) []reconcile.Request {
return asReconcileRequestForNamespaceIf(mo.Meta, func() bool {
return (isLabeledAsUserSettings(mo.Meta) && r.isInManagedNamespace(ctx, mo.Meta)) ||
r.hasNameAndIsCollocatedWithCheCluster(ctx, mo.Meta, deploy.CheTLSSelfSignedCertificateSecretName)
})
}),
}
}
func (r *CheUserNamespaceReconciler) watchRulesForConfigMaps(ctx context.Context) handler.EventHandler {
return &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(func(mo handler.MapObject) []reconcile.Request {
return asReconcileRequestForNamespaceIf(mo.Meta, func() bool {
return (isLabeledAsUserSettings(mo.Meta) && r.isInManagedNamespace(ctx, mo.Meta)) ||
r.hasNameAndIsCollocatedWithCheCluster(ctx, mo.Meta, deploy.CheAllCACertsConfigMapName)
})
}),
}
}
func (r *CheUserNamespaceReconciler) hasNameAndIsCollocatedWithCheCluster(ctx context.Context, obj metav1.Object, names ...string) bool {
for _, n := range names {
if obj.GetName() == n && r.hasCheCluster(ctx, obj.GetNamespace()) {
return true
}
}
return false
}
func isLabeledAsUserSettings(obj metav1.Object) bool {
return obj.GetLabels()["app.kubernetes.io/component"] == userSettingsComponentLabelValue
}
func (r *CheUserNamespaceReconciler) isInManagedNamespace(ctx context.Context, obj metav1.Object) bool {
info, err := r.namespaceCache.GetNamespaceInfo(ctx, obj.GetNamespace())
return err == nil && info != nil && info.OwnerUid != ""
}
func (r *CheUserNamespaceReconciler) triggerAllNamespaces(ctx context.Context) handler.EventHandler {
return &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(func(mo handler.MapObject) []reconcile.Request {
nss := r.namespaceCache.GetAllKnownNamespaces()
ret := make([]reconcile.Request, len(nss))
for _, ns := range nss {
ret = append(ret, reconcile.Request{
NamespacedName: types.NamespacedName{Name: ns},
})
}
return ret
}),
}
}
func (r *CheUserNamespaceReconciler) hasCheCluster(ctx context.Context, namespace string) bool {
list := v1.CheClusterList{}
if err := r.client.List(ctx, &list, client.InNamespace(namespace)); err != nil {
return false
}
return len(list.Items) > 0
}
func asReconcileRequestForNamespace(obj metav1.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{Name: obj.GetNamespace()},
},
}
}
func asReconcileRequestForNamespaceIf(obj metav1.Object, predicate func() bool) []reconcile.Request {
if predicate() {
return asReconcileRequestForNamespace(obj)
} else {
return []reconcile.Request{}
}
}
func (r *CheUserNamespaceReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
info, err := r.namespaceCache.ExamineNamespace(ctx, req.Name)
if err != nil {
logrus.Errorf("Failed to examine namespace %s for presence of Che user info labels: %v", req.Name, err)
return ctrl.Result{}, err
}
if info == nil || info.OwnerUid == "" {
// we're not handling this namespace
return ctrl.Result{}, nil
}
checluster := findManagingCheCluster(*info.CheCluster)
if checluster == nil {
return ctrl.Result{Requeue: true}, nil
}
if devworkspace.GetDevworkspaceState(r.scheme, checluster) != devworkspace.DevworkspaceStateEnabled {
return ctrl.Result{}, nil
}
// let's construct the deployContext to be able to use methods from v1 operator
deployContext := &deploy.DeployContext{
CheCluster: org.AsV1(checluster),
ClusterAPI: deploy.ClusterAPI{
Client: r.client,
NonCachedClient: r.client,
DiscoveryClient: nil,
Scheme: r.scheme,
},
}
if err = r.reconcileSelfSignedCert(ctx, deployContext, req.Name, checluster); err != nil {
logrus.Errorf("Failed to reconcile self-signed certificate into namespace '%s': %v", req.Name, err)
return ctrl.Result{}, err
}
if err = r.reconcileTrustedCerts(ctx, deployContext, req.Name, checluster); err != nil {
logrus.Errorf("Failed to reconcile self-signed certificate into namespace '%s': %v", req.Name, err)
return ctrl.Result{}, err
}
if err = r.reconcileProxySettings(ctx, req.Name, checluster, deployContext); err != nil {
logrus.Errorf("Failed to reconcile proxy settings into namespace '%s': %v", req.Name, err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func findManagingCheCluster(key types.NamespacedName) *v2alpha1.CheCluster {
instances := devworkspace.GetCurrentCheClusterInstances()
if len(instances) == 0 {
return nil
}
if len(instances) == 1 {
for k, v := range instances {
if key.Name == "" || (key.Name == k.Name && key.Namespace == k.Namespace) {
return &v
}
return nil
}
}
ret, ok := instances[key]
if ok {
return &ret
} else {
return nil
}
}
func (r *CheUserNamespaceReconciler) reconcileSelfSignedCert(ctx context.Context, deployContext *deploy.DeployContext, targetNs string, checluster *v2alpha1.CheCluster) error {
targetCertName := prefixedName(checluster, "server-cert")
delSecret := func() error {
_, err := deploy.Delete(deployContext, client.ObjectKey{Name: targetCertName, Namespace: targetNs}, &corev1.Secret{})
return err
}
cheCert := &corev1.Secret{}
if err := r.client.Get(ctx, client.ObjectKey{Name: deploy.CheTLSSelfSignedCertificateSecretName, Namespace: checluster.Namespace}, cheCert); err != nil {
if !errors.IsNotFound(err) {
return err
}
// There is not self-signed cert in the namespace of the checluster, so we have nothing to copy around
return delSecret()
}
if _, ok := cheCert.Data["ca.crt"]; !ok {
// the secret doesn't contain the certificate. bail out.
return delSecret()
}
targetCert := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: targetCertName,
Namespace: targetNs,
Labels: defaults.AddStandardLabelsForComponent(checluster, userSettingsComponentLabelValue, map[string]string{
constants.DevWorkspaceMountLabel: "true",
}),
Annotations: map[string]string{
constants.DevWorkspaceMountAsAnnotation: "file",
constants.DevWorkspaceMountPathAnnotation: "/tmp/che/secret/",
},
},
Data: map[string][]byte{
"ca.crt": cheCert.Data["ca.crt"],
},
}
_, err := deploy.DoSync(deployContext, targetCert, deploy.SecretDiffOpts)
return err
}
func (r *CheUserNamespaceReconciler) reconcileTrustedCerts(ctx context.Context, deployContext *deploy.DeployContext, targetNs string, checluster *v2alpha1.CheCluster) error {
targetConfigMapName := prefixedName(checluster, "trusted-ca-certs")
delConfigMap := func() error {
_, err := deploy.Delete(deployContext, client.ObjectKey{Name: targetConfigMapName, Namespace: targetNs}, &corev1.Secret{})
return err
}
sourceMap := &corev1.ConfigMap{}
if err := r.client.Get(ctx, client.ObjectKey{Name: deploy.CheAllCACertsConfigMapName, Namespace: checluster.Namespace}, sourceMap); err != nil {
if !errors.IsNotFound(err) {
return err
}
return delConfigMap()
}
targetMap := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: targetConfigMapName,
Namespace: targetNs,
Labels: defaults.AddStandardLabelsForComponent(checluster, userSettingsComponentLabelValue, map[string]string{
constants.DevWorkspaceMountLabel: "true",
}),
Annotations: addToFirst(sourceMap.Annotations, map[string]string{
constants.DevWorkspaceMountAsAnnotation: "file",
constants.DevWorkspaceMountPathAnnotation: "/public-certs",
}),
},
Data: sourceMap.Data,
}
_, err := deploy.DoSync(deployContext, targetMap, deploy.ConfigMapDiffOpts)
return err
}
func addToFirst(first map[string]string, second map[string]string) map[string]string {
if first == nil {
first = map[string]string{}
}
for k, v := range second {
first[k] = v
}
return first
}
func (r *CheUserNamespaceReconciler) reconcileProxySettings(ctx context.Context, targetNs string, checluster *v2alpha1.CheCluster, deployContext *deploy.DeployContext) error {
proxyConfig, err := che.GetProxyConfiguration(deployContext)
if err != nil {
return err
}
if proxyConfig == nil {
return nil
}
proxySettings := map[string]string{}
if proxyConfig.HttpProxy != "" {
proxySettings["HTTP_PROXY"] = proxyConfig.HttpProxy
}
if proxyConfig.HttpsProxy != "" {
proxySettings["HTTPS_PROXY"] = proxyConfig.HttpsProxy
}
if proxyConfig.NoProxy != "" {
proxySettings["NO_PROXY"] = proxyConfig.NoProxy
}
key := client.ObjectKey{Name: prefixedName(checluster, "proxy-settings"), Namespace: targetNs}
cfg := &corev1.ConfigMap{}
exists := true
if err := r.client.Get(ctx, key, cfg); err != nil {
if errors.IsNotFound(err) {
exists = false
} else {
return err
}
}
if len(proxySettings) == 0 {
if exists {
if err := r.client.Delete(ctx, cfg); err != nil {
return err
}
}
return nil
}
requiredLabels := defaults.AddStandardLabelsForComponent(checluster, userSettingsComponentLabelValue, map[string]string{
constants.DevWorkspaceMountLabel: "true",
})
requiredAnnos := map[string]string{
constants.DevWorkspaceMountAsAnnotation: "env",
}
cfg = &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: prefixedName(checluster, "proxy-settings"),
Namespace: targetNs,
Labels: requiredLabels,
Annotations: requiredAnnos,
},
Data: proxySettings,
}
_, err = deploy.DoSync(deployContext, cfg, deploy.ConfigMapDiffOpts)
return err
}
func prefixedName(checluster *v2alpha1.CheCluster, name string) string {
return checluster.Name + "-" + checluster.Namespace + "-" + name
}

View File

@ -0,0 +1,330 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package usernamespace
import (
"context"
"sync"
"testing"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
v1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/pkg/deploy"
"github.com/eclipse-che/che-operator/pkg/util"
configv1 "github.com/openshift/api/config/v1"
projectv1 "github.com/openshift/api/project/v1"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func setupCheCluster(t *testing.T, ctx context.Context, cl client.Client, scheme *runtime.Scheme, cheNamespaceName string, cheName string) {
var cheNamespace metav1.Object
if infrastructure.IsOpenShift() {
cheNamespace = &projectv1.Project{}
} else {
cheNamespace = &corev1.Namespace{}
}
cheNamespace.SetName(cheNamespaceName)
if err := cl.Create(ctx, cheNamespace.(runtime.Object)); err != nil {
t.Fatal(err)
}
cheCluster := v1.CheCluster{
ObjectMeta: metav1.ObjectMeta{
Name: cheName,
Namespace: cheNamespaceName,
},
Spec: v1.CheClusterSpec{
Server: v1.CheClusterSpecServer{
CheHost: "che-host",
CustomCheProperties: map[string]string{
"CHE_INFRA_OPENSHIFT_ROUTE_HOST_DOMAIN__SUFFIX": "root-domain",
},
},
DevWorkspace: v1.CheClusterSpecDevWorkspace{
Enable: true,
},
K8s: v1.CheClusterSpecK8SOnly{
IngressDomain: "root-domain",
},
},
}
if err := cl.Create(ctx, &cheCluster); err != nil {
t.Fatal(err)
}
// also create the self-signed-certificate secret to pretend we have TLS set up
cert := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: deploy.CheTLSSelfSignedCertificateSecretName,
Namespace: cheNamespaceName,
},
Data: map[string][]byte{
"ca.crt": []byte("my certificate"),
"other.data": []byte("should not be copied to target ns"),
},
}
if err := cl.Create(ctx, cert); err != nil {
t.Fatal(err)
}
caCerts := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: deploy.CheAllCACertsConfigMapName,
Namespace: cheNamespaceName,
},
Data: map[string]string{
"trusted1": "trusted cert 1",
"trusted2": "trusted cert 2",
},
}
if err := cl.Create(ctx, caCerts); err != nil {
t.Fatal(err)
}
r := devworkspace.New(cl, scheme)
// the reconciliation needs to run twice for it to be truly finished - we're setting up finalizers etc...
if _, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: cheName, Namespace: cheNamespaceName}}); err != nil {
t.Fatal(err)
}
if _, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: cheName, Namespace: cheNamespaceName}}); err != nil {
t.Fatal(err)
}
}
func setup(infraType infrastructure.Type, objs ...runtime.Object) (*runtime.Scheme, client.Client, *CheUserNamespaceReconciler) {
infrastructure.InitializeForTesting(infraType)
devworkspace.CleanCheClusterInstancesForTest()
util.IsOpenShift = infraType == infrastructure.OpenShiftv4
util.IsOpenShift4 = infraType == infrastructure.OpenShiftv4
scheme := createTestScheme()
cl := fake.NewFakeClientWithScheme(scheme, objs...)
r := &CheUserNamespaceReconciler{
client: cl,
scheme: scheme,
namespaceCache: namespaceCache{
client: cl,
knownNamespaces: map[string]namespaceInfo{},
lock: sync.Mutex{},
},
}
return scheme, cl, r
}
func TestSkipsUnlabeledNamespaces(t *testing.T) {
test := func(t *testing.T, infraType infrastructure.Type, namespace metav1.Object) {
ctx := context.TODO()
scheme, cl, r := setup(infraType, namespace.(runtime.Object))
setupCheCluster(t, ctx, cl, scheme, "che", "che")
if _, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: namespace.GetName()}}); err != nil {
t.Fatal(err)
}
// no new secret or configmap should be created in the namespace
ss := &corev1.SecretList{}
if err := cl.List(ctx, ss, client.InNamespace(namespace.GetName())); err != nil {
t.Fatal(err)
}
assert.True(t, len(ss.Items) == 0, "No secrets expected in the tested namespace but found %d", len(ss.Items))
cs := &corev1.ConfigMapList{}
if err := cl.List(ctx, cs, client.InNamespace(namespace.GetName())); err != nil {
t.Fatal(err)
}
assert.True(t, len(cs.Items) == 0, "No configmaps expected in the tested namespace but found %d", len(cs.Items))
}
t.Run("k8s", func(t *testing.T) {
test(t, infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
})
})
t.Run("openshift", func(t *testing.T) {
test(t, infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
},
})
})
}
func TestRequiresLabelsToMatchOneOfMultipleCheCluster(t *testing.T) {
test := func(t *testing.T, infraType infrastructure.Type, namespace metav1.Object) {
ctx := context.TODO()
scheme, cl, r := setup(infraType, namespace.(runtime.Object))
setupCheCluster(t, ctx, cl, scheme, "che1", "che")
setupCheCluster(t, ctx, cl, scheme, "che2", "che")
res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: namespace.GetName()}})
assert.NoError(t, err, "Reconciliation should have succeeded.")
assert.True(t, res.Requeue, "The reconciliation request should have been requeued.")
}
t.Run("k8s", func(t *testing.T) {
test(t, infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
},
},
})
})
t.Run("openshift", func(t *testing.T) {
test(t, infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
},
},
})
})
}
func TestMatchingCheClusterCanBeSelectedUsingLabels(t *testing.T) {
test := func(t *testing.T, infraType infrastructure.Type, namespace metav1.Object) {
ctx := context.TODO()
scheme, cl, r := setup(infraType, namespace.(runtime.Object))
setupCheCluster(t, ctx, cl, scheme, "che1", "che")
setupCheCluster(t, ctx, cl, scheme, "che2", "che")
res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: namespace.GetName()}})
assert.NoError(t, err, "Reconciliation shouldn't have failed")
assert.False(t, res.Requeue, "The reconciliation request should have succeeded but is requesting a requeue.")
}
t.Run("k8s", func(t *testing.T) {
test(t, infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
cheNameLabel: "che",
cheNamespaceLabel: "che1",
},
},
})
})
t.Run("openshift", func(t *testing.T) {
test(t, infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
cheNameLabel: "che",
cheNamespaceLabel: "che1",
},
},
})
})
}
func TestCreatesDataInNamespace(t *testing.T) {
test := func(t *testing.T, infraType infrastructure.Type, namespace metav1.Object, objs ...runtime.Object) {
ctx := context.TODO()
allObjs := append(objs, namespace.(runtime.Object))
scheme, cl, r := setup(infraType, allObjs...)
setupCheCluster(t, ctx, cl, scheme, "eclipse-che", "che")
res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: namespace.GetName()}})
assert.NoError(t, err, "Reconciliation should have succeeded")
assert.False(t, res.Requeue, "The reconciliation request should have succeeded but it is requesting a requeue")
proxySettings := corev1.ConfigMap{}
assert.NoError(t, cl.Get(ctx, client.ObjectKey{Name: "che-eclipse-che-proxy-settings", Namespace: namespace.GetName()}, &proxySettings))
assert.Equal(t, "env", proxySettings.GetAnnotations()[constants.DevWorkspaceMountAsAnnotation],
"proxy settings should be annotated as mount as 'env'")
assert.Equal(t, "true", proxySettings.GetLabels()[constants.DevWorkspaceMountLabel],
"proxy settings should be labeled as mounted")
assert.Equal(t, 1, len(proxySettings.Data), "Expecting just 1 element in the default proxy settings")
assert.Equal(t, ".svc", proxySettings.Data["NO_PROXY"], "Unexpected proxy settings")
cert := corev1.Secret{}
assert.NoError(t, cl.Get(ctx, client.ObjectKey{Name: "che-eclipse-che-server-cert", Namespace: namespace.GetName()}, &cert))
assert.Equal(t, "file", cert.GetAnnotations()[constants.DevWorkspaceMountAsAnnotation], "server cert should be annotated as mount as 'file'")
assert.Equal(t, "/tmp/che/secret/", cert.GetAnnotations()[constants.DevWorkspaceMountPathAnnotation], "server cert annotated as mounted to an unexpected path")
assert.Equal(t, "true", cert.GetLabels()[constants.DevWorkspaceMountLabel], "server cert should be labeled as mounted")
assert.Equal(t, 1, len(cert.Data), "Expecting just 1 element in the self-signed cert")
assert.Equal(t, "my certificate", string(cert.Data["ca.crt"]), "Unexpected self-signed certificate")
caCerts := corev1.ConfigMap{}
assert.NoError(t, cl.Get(ctx, client.ObjectKey{Name: "che-eclipse-che-trusted-ca-certs", Namespace: namespace.GetName()}, &caCerts))
assert.Equal(t, "file", caCerts.GetAnnotations()[constants.DevWorkspaceMountAsAnnotation], "trusted certs should be annotated as mount as 'file'")
assert.Equal(t, "/public-certs", caCerts.GetAnnotations()[constants.DevWorkspaceMountPathAnnotation], "trusted certs annotated as mounted to an unexpected path")
assert.Equal(t, "true", caCerts.GetLabels()[constants.DevWorkspaceMountLabel], "trusted certs should be labeled as mounted")
assert.Equal(t, 2, len(caCerts.Data), "Expecting exactly 2 data entries in the trusted cert config map")
assert.Equal(t, "trusted cert 1", string(caCerts.Data["trusted1"]), "Unexpected trusted cert 1 value")
assert.Equal(t, "trusted cert 2", string(caCerts.Data["trusted2"]), "Unexpected trusted cert 2 value")
}
t.Run("k8s", func(t *testing.T) {
test(t, infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
},
},
})
})
t.Run("openshift", func(t *testing.T) {
test(t, infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
Labels: map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
},
},
}, &configv1.Proxy{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
},
Spec: configv1.ProxySpec{
NoProxy: ".svc",
},
Status: configv1.ProxyStatus{
NoProxy: ".svc",
},
})
})
}

View File

@ -0,0 +1,133 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package usernamespace
import (
"context"
"sync"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
projectv1 "github.com/openshift/api/project/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
workspaceNamespaceOwnerUidLabel string = "che.eclipse.org/workspace-namespace-owner-uid"
cheNameLabel string = "che.eclipse.org/che-name"
cheNamespaceLabel string = "che.eclipse.org/che-namespace"
)
type namespaceCache struct {
client client.Client
knownNamespaces map[string]namespaceInfo
lock sync.Mutex
}
type namespaceInfo struct {
OwnerUid string
CheCluster *types.NamespacedName
}
func NewNamespaceCache() *namespaceCache {
return &namespaceCache{
knownNamespaces: map[string]namespaceInfo{},
lock: sync.Mutex{},
}
}
func (c *namespaceCache) GetNamespaceInfo(ctx context.Context, namespace string) (*namespaceInfo, error) {
c.lock.Lock()
defer c.lock.Unlock()
for {
val, contains := c.knownNamespaces[namespace]
if contains {
return &val, nil
} else {
existing, err := c.examineNamespaceUnsafe(ctx, namespace)
if err != nil {
return nil, err
} else if existing == nil {
return nil, nil
}
}
}
}
func (c *namespaceCache) ExamineNamespace(ctx context.Context, ns string) (*namespaceInfo, error) {
c.lock.Lock()
defer c.lock.Unlock()
return c.examineNamespaceUnsafe(ctx, ns)
}
func (c *namespaceCache) GetAllKnownNamespaces() []string {
c.lock.Lock()
defer c.lock.Unlock()
ret := make([]string, len(c.knownNamespaces))
for k, _ := range c.knownNamespaces {
ret = append(ret, k)
}
return ret
}
func (c *namespaceCache) examineNamespaceUnsafe(ctx context.Context, ns string) (*namespaceInfo, error) {
var obj runtime.Object
if infrastructure.IsOpenShift() {
obj = &projectv1.Project{}
} else {
obj = &corev1.Namespace{}
}
if err := c.client.Get(ctx, client.ObjectKey{Name: ns}, obj); err != nil {
if errors.IsNotFound(err) {
delete(c.knownNamespaces, ns)
return nil, nil
}
return nil, err
}
var namespace = obj.(metav1.Object)
if namespace.GetDeletionTimestamp() != nil {
delete(c.knownNamespaces, ns)
return nil, nil
}
labels := namespace.GetLabels()
if labels == nil {
labels = map[string]string{}
}
ownerUid := labels[workspaceNamespaceOwnerUidLabel]
cheName := labels[cheNameLabel]
cheNamespace := labels[cheNamespaceLabel]
ret := namespaceInfo{
OwnerUid: ownerUid,
CheCluster: &types.NamespacedName{
Name: cheName,
Namespace: cheNamespace,
},
}
c.knownNamespaces[ns] = ret
return &ret, nil
}

View File

@ -0,0 +1,136 @@
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//
package usernamespace
import (
"context"
"sync"
"testing"
dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
v1 "github.com/eclipse-che/che-operator/api/v1"
"github.com/stretchr/testify/assert"
projectv1 "github.com/openshift/api/project/v1"
routev1 "github.com/openshift/api/route/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/api/node/v1alpha1"
rbac "k8s.io/api/rbac/v1"
configv1 "github.com/openshift/api/config/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func createTestScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(extensions.AddToScheme(scheme))
utilruntime.Must(corev1.AddToScheme(scheme))
utilruntime.Must(appsv1.AddToScheme(scheme))
utilruntime.Must(rbac.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(dwo.AddToScheme(scheme))
utilruntime.Must(projectv1.AddToScheme(scheme))
utilruntime.Must(configv1.AddToScheme(scheme))
return scheme
}
func TestGetNamespaceInfoReadsFromCache(t *testing.T) {
test := func(infraType infrastructure.Type, namespace metav1.Object) {
infrastructure.InitializeForTesting(infraType)
ctx := context.TODO()
ns := namespace.GetName()
cl := fake.NewFakeClientWithScheme(createTestScheme(), namespace.(runtime.Object))
nsc := namespaceCache{
client: cl,
knownNamespaces: map[string]namespaceInfo{},
lock: sync.Mutex{},
}
_, err := nsc.GetNamespaceInfo(ctx, ns)
assert.NoError(t, err)
assert.Contains(t, nsc.knownNamespaces, ns, "The namespace info should have been cached")
}
test(infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
})
test(infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
},
})
}
func TestExamineUpdatesCache(t *testing.T) {
test := func(infraType infrastructure.Type, namespace metav1.Object) {
ctx := context.TODO()
nsName := namespace.GetName()
cl := fake.NewFakeClientWithScheme(createTestScheme(), namespace.(runtime.Object))
infrastructure.InitializeForTesting(infraType)
nsc := namespaceCache{
client: cl,
knownNamespaces: map[string]namespaceInfo{},
lock: sync.Mutex{},
}
nsi, err := nsc.GetNamespaceInfo(ctx, nsName)
assert.NoError(t, err)
assert.Empty(t, nsi.OwnerUid, "Detected owner UID should be empty")
assert.Contains(t, nsc.knownNamespaces, nsName, "The namespace info should have been cached")
ns := namespace.(runtime.Object).DeepCopyObject()
assert.NoError(t, cl.Get(ctx, client.ObjectKey{Name: nsName}, ns))
ns.(metav1.Object).SetLabels(map[string]string{
workspaceNamespaceOwnerUidLabel: "uid",
})
assert.NoError(t, cl.Update(ctx, ns))
nsi, err = nsc.ExamineNamespace(ctx, nsName)
assert.NoError(t, err)
assert.Equal(t, "uid", nsi.OwnerUid, "unexpected detected owner UID")
}
test(infrastructure.Kubernetes, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
})
test(infrastructure.OpenShiftv4, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: "prj",
},
})
}

15
main.go
View File

@ -47,6 +47,7 @@ import (
restorecontroller "github.com/eclipse-che/che-operator/controllers/checlusterrestore" restorecontroller "github.com/eclipse-che/che-operator/controllers/checlusterrestore"
"github.com/eclipse-che/che-operator/controllers/devworkspace" "github.com/eclipse-che/che-operator/controllers/devworkspace"
"github.com/eclipse-che/che-operator/controllers/devworkspace/solver" "github.com/eclipse-che/che-operator/controllers/devworkspace/solver"
"github.com/eclipse-che/che-operator/controllers/usernamespace"
"github.com/eclipse-che/che-operator/pkg/deploy" "github.com/eclipse-che/che-operator/pkg/deploy"
"github.com/eclipse-che/che-operator/pkg/signal" "github.com/eclipse-che/che-operator/pkg/signal"
"github.com/eclipse-che/che-operator/pkg/util" "github.com/eclipse-che/che-operator/pkg/util"
@ -62,6 +63,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
image_puller_api "github.com/che-incubator/kubernetes-image-puller-operator/pkg/apis" image_puller_api "github.com/che-incubator/kubernetes-image-puller-operator/pkg/apis"
projectv1 "github.com/openshift/api/project/v1"
routev1 "github.com/openshift/api/route/v1" routev1 "github.com/openshift/api/route/v1"
userv1 "github.com/openshift/api/user/v1" userv1 "github.com/openshift/api/user/v1"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@ -130,6 +132,7 @@ func init() {
utilruntime.Must(configv1.AddToScheme(scheme)) utilruntime.Must(configv1.AddToScheme(scheme))
utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme))
utilruntime.Must(consolev1.AddToScheme(scheme)) utilruntime.Must(consolev1.AddToScheme(scheme))
utilruntime.Must(projectv1.AddToScheme(scheme))
} }
} }
@ -304,8 +307,20 @@ func enableDevworkspaceSupport(mgr manager.Manager) error {
SolverGetter: solver.Getter(mgr.GetScheme()), SolverGetter: solver.Getter(mgr.GetScheme()),
} }
if err := routing.SetupWithManager(mgr); err != nil { if err := routing.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to set up controller", "controller", "DevWorkspaceRouting")
return err return err
} }
userNamespaceReconciler := usernamespace.NewReconciler()
if err = userNamespaceReconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to set up controller", "controller", "CheUserReconciler")
return err
}
setupLog.Info("Devworkspace support enabled")
} else {
setupLog.Info("Devworkspace support disabled")
} }
return nil return nil

View File

@ -20,7 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
var configMapDiffOpts = cmp.Options{ var ConfigMapDiffOpts = cmp.Options{
cmpopts.IgnoreFields(corev1.ConfigMap{}, "TypeMeta"), cmpopts.IgnoreFields(corev1.ConfigMap{}, "TypeMeta"),
cmp.Comparer(func(x, y metav1.ObjectMeta) bool { cmp.Comparer(func(x, y metav1.ObjectMeta) bool {
return reflect.DeepEqual(x.Labels, y.Labels) return reflect.DeepEqual(x.Labels, y.Labels)
@ -34,14 +34,14 @@ func SyncConfigMapDataToCluster(
component string) (bool, error) { component string) (bool, error) {
configMapSpec := GetConfigMapSpec(deployContext, name, data, component) configMapSpec := GetConfigMapSpec(deployContext, name, data, component)
return Sync(deployContext, configMapSpec, configMapDiffOpts) return Sync(deployContext, configMapSpec, ConfigMapDiffOpts)
} }
func SyncConfigMapSpecToCluster( func SyncConfigMapSpecToCluster(
deployContext *DeployContext, deployContext *DeployContext,
configMapSpec *corev1.ConfigMap) (bool, error) { configMapSpec *corev1.ConfigMap) (bool, error) {
return Sync(deployContext, configMapSpec, configMapDiffOpts) return Sync(deployContext, configMapSpec, ConfigMapDiffOpts)
} }
func GetConfigMapSpec( func GetConfigMapSpec(

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
var secretDiffOpts = cmp.Options{ var SecretDiffOpts = cmp.Options{
cmpopts.IgnoreFields(corev1.Secret{}, "TypeMeta", "ObjectMeta"), cmpopts.IgnoreFields(corev1.Secret{}, "TypeMeta", "ObjectMeta"),
} }
@ -39,7 +39,7 @@ func SyncSecretToCluster(
data map[string][]byte) (bool, error) { data map[string][]byte) (bool, error) {
secretSpec := GetSecretSpec(deployContext, name, namespace, data) secretSpec := GetSecretSpec(deployContext, name, namespace, data)
return Sync(deployContext, secretSpec, secretDiffOpts) return Sync(deployContext, secretSpec, SecretDiffOpts)
} }
// Get all secrets by labels and annotations // Get all secrets by labels and annotations

View File

@ -28,6 +28,10 @@ import (
// Sync syncs the blueprint to the cluster in a generic (as much as Go allows) manner. // Sync syncs the blueprint to the cluster in a generic (as much as Go allows) manner.
// Returns true if object is up to date otherwiser returns false // Returns true if object is up to date otherwiser returns false
//
// WARNING: For legacy reasons, this method bails out quickly without doing anything if the CheCluster resource
// is being deleted (it does this by examining the deployContext, not the cluster). If you don't want
// this behavior, use the DoSync method.
func Sync(deployContext *DeployContext, blueprint metav1.Object, diffOpts ...cmp.Option) (bool, error) { func Sync(deployContext *DeployContext, blueprint metav1.Object, diffOpts ...cmp.Option) (bool, error) {
// eclipse-che custom resource is being deleted, we shouldn't sync // eclipse-che custom resource is being deleted, we shouldn't sync
// TODO move this check before `Sync` invocation // TODO move this check before `Sync` invocation
@ -35,6 +39,12 @@ func Sync(deployContext *DeployContext, blueprint metav1.Object, diffOpts ...cmp
return true, nil return true, nil
} }
return DoSync(deployContext, blueprint, diffOpts...)
}
// Sync syncs the blueprint to the cluster in a generic (as much as Go allows) manner.
// Returns true if object is up to date otherwiser returns false
func DoSync(deployContext *DeployContext, blueprint metav1.Object, diffOpts ...cmp.Option) (bool, error) {
runtimeObject, ok := blueprint.(runtime.Object) runtimeObject, ok := blueprint.(runtime.Object)
if !ok { if !ok {
return false, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", runtimeObject) return false, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", runtimeObject)

8
vendor/github.com/openshift/api/project/v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=project.openshift.io
// Package v1 is the v1 version of the API.
package v1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package github.com.openshift.api.project.v1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members,
// a quota on the resources that the project may consume, and the security controls on the resources in
// the project. Within a project, members may have different roles - project administrators can set
// membership, editors can create and manage the resources, and viewers can see but not access running
// containers. In a normal cluster project administrators are not able to alter their quotas - that is
// restricted to cluster administrators.
//
// Listing or watching projects will return only projects the user has the reader role on.
//
// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed
// as editable to end users while namespaces are not. Direct creation of a project is typically restricted
// to administrators, while end users should use the requestproject resource.
message Project {
// Standard object's metadata.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines the behavior of the Namespace.
optional ProjectSpec spec = 2;
// Status describes the current status of a Namespace
optional ProjectStatus status = 3;
}
// ProjectList is a list of Project objects.
message ProjectList {
// Standard object's metadata.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of projects
repeated Project items = 2;
}
// ProjecRequest is the set of options necessary to fully qualify a project request
message ProjectRequest {
// Standard object's metadata.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// DisplayName is the display name to apply to a project
optional string displayName = 2;
// Description is the description to apply to a project
optional string description = 3;
}
// ProjectSpec describes the attributes on a Project
message ProjectSpec {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage
repeated string finalizers = 1;
}
// ProjectStatus is information about the current status of a Project
message ProjectStatus {
// Phase is the current lifecycle phase of the project
// +optional
optional string phase = 1;
// Represents the latest available observations of the project current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2;
}

23
vendor/github.com/openshift/api/project/v1/legacy.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&Project{},
&ProjectList{},
&ProjectRequest{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}

40
vendor/github.com/openshift/api/project/v1/register.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "project.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&Project{},
&ProjectList{},
&ProjectRequest{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}

93
vendor/github.com/openshift/api/project/v1/types.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ProjectList is a list of Project objects.
type ProjectList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of projects
Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"`
}
const (
// These are internal finalizer values to Origin
FinalizerOrigin corev1.FinalizerName = "openshift.io/origin"
// ProjectNodeSelector is an annotation that holds the node selector;
// the node selector annotation determines which nodes will have pods from this project scheduled to them
ProjectNodeSelector = "openshift.io/node-selector"
// ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present,
// but it is set by the default project template.
ProjectRequesterAnnotation = "openshift.io/requester"
)
// ProjectSpec describes the attributes on a Project
type ProjectSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage
Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"`
}
// ProjectStatus is information about the current status of a Project
type ProjectStatus struct {
// Phase is the current lifecycle phase of the project
// +optional
Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"`
// Represents the latest available observations of the project current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members,
// a quota on the resources that the project may consume, and the security controls on the resources in
// the project. Within a project, members may have different roles - project administrators can set
// membership, editors can create and manage the resources, and viewers can see but not access running
// containers. In a normal cluster project administrators are not able to alter their quotas - that is
// restricted to cluster administrators.
//
// Listing or watching projects will return only projects the user has the reader role on.
//
// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed
// as editable to end users while namespaces are not. Direct creation of a project is typically restricted
// to administrators, while end users should use the requestproject resource.
type Project struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace
Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=Project
// ProjecRequest is the set of options necessary to fully qualify a project request
type ProjectRequest struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// DisplayName is the display name to apply to a project
DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"`
// Description is the description to apply to a project
Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"`
}

View File

@ -0,0 +1,141 @@
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Project) DeepCopyInto(out *Project) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
func (in *Project) DeepCopy() *Project {
if in == nil {
return nil
}
out := new(Project)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Project) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectList) DeepCopyInto(out *ProjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Project, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
func (in *ProjectList) DeepCopy() *ProjectList {
if in == nil {
return nil
}
out := new(ProjectList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest.
func (in *ProjectRequest) DeepCopy() *ProjectRequest {
if in == nil {
return nil
}
out := new(ProjectRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
*out = *in
if in.Finalizers != nil {
in, out := &in.Finalizers, &out.Finalizers
*out = make([]corev1.FinalizerName, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
func (in *ProjectSpec) DeepCopy() *ProjectSpec {
if in == nil {
return nil
}
out := new(ProjectSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]corev1.NamespaceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
func (in *ProjectStatus) DeepCopy() *ProjectStatus {
if in == nil {
return nil
}
out := new(ProjectStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,65 @@
package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_Project = map[string]string{
"": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.",
"metadata": "Standard object's metadata.",
"spec": "Spec defines the behavior of the Namespace.",
"status": "Status describes the current status of a Namespace",
}
func (Project) SwaggerDoc() map[string]string {
return map_Project
}
var map_ProjectList = map[string]string{
"": "ProjectList is a list of Project objects.",
"metadata": "Standard object's metadata.",
"items": "Items is the list of projects",
}
func (ProjectList) SwaggerDoc() map[string]string {
return map_ProjectList
}
var map_ProjectRequest = map[string]string{
"": "ProjecRequest is the set of options necessary to fully qualify a project request",
"metadata": "Standard object's metadata.",
"displayName": "DisplayName is the display name to apply to a project",
"description": "Description is the description to apply to a project",
}
func (ProjectRequest) SwaggerDoc() map[string]string {
return map_ProjectRequest
}
var map_ProjectSpec = map[string]string{
"": "ProjectSpec describes the attributes on a Project",
"finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage",
}
func (ProjectSpec) SwaggerDoc() map[string]string {
return map_ProjectSpec
}
var map_ProjectStatus = map[string]string{
"": "ProjectStatus is information about the current status of a Project",
"phase": "Phase is the current lifecycle phase of the project",
"conditions": "Represents the latest available observations of the project current state.",
}
func (ProjectStatus) SwaggerDoc() map[string]string {
return map_ProjectStatus
}
// AUTO-GENERATED FUNCTIONS END HERE

1
vendor/modules.txt vendored
View File

@ -121,6 +121,7 @@ github.com/onsi/gomega/types
github.com/openshift/api/config/v1 github.com/openshift/api/config/v1
github.com/openshift/api/console/v1 github.com/openshift/api/console/v1
github.com/openshift/api/oauth/v1 github.com/openshift/api/oauth/v1
github.com/openshift/api/project/v1
github.com/openshift/api/route/v1 github.com/openshift/api/route/v1
github.com/openshift/api/user/v1 github.com/openshift/api/user/v1
# github.com/operator-framework/api v0.10.0 => github.com/operator-framework/api v0.8.0 # github.com/operator-framework/api v0.10.0 => github.com/operator-framework/api v0.8.0