> -o template --template='{{ .metadata.name }}'
+ protected static String ACTIVEMQ_ARTEMIS_CRD_NAME = "activemqartemises.broker.amq.io";
+
+ protected static String ACTIVEMQ_ARTEMIS_ADDRESS_CRD_NAME = "activemqartemisaddresses.broker.amq.io";
+
/**
- * Get the provisioned application service related Pods
- *
- * Currently blocked by the fact that Pod Status pod names do not reflect the reality
- *
- * Once these issues are resolved, we can use the ready pod names returned by
- * {@code ActiveMQArtemisStatus.getPodStatus()} to create the List with pods maintained by the provisioner.
+ * Generic CRD client which is used by client builders default implementation to build the CRDs client
*
- * @return A list of related {@link Pod} instances
+ * @return A {@link NonNamespaceOperation} instance that represents a
*/
- @Override
- public List getPods() {
- throw new UnsupportedOperationException("To be implemented!");
+ protected abstract NonNamespaceOperation> customResourceDefinitionsClient();
+
+ // activemqartemises.broker.amq.io
+ protected abstract HasMetadataOperationsImpl activeMQArtemisCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
+
+ // activemqartemisaddresses.broker.amq.io
+ protected abstract HasMetadataOperationsImpl activeMQArtemisAddressesCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
+
+ private static NonNamespaceOperation> ACTIVE_MQ_ARTEMISES_CLIENT;
+
+ /**
+ * Get a client capable of working with {@link {@link ActiveMQOperatorProvisioner#ACTIVEMQ_ARTEMIS_CRD_NAME}} custom resource.
+ *
+ * @return client for operations with {@link {@link ActiveMQOperatorProvisioner#ACTIVEMQ_ARTEMIS_CRD_NAME}} custom resource
+ */
+ public NonNamespaceOperation> activeMQArtemisesClient() {
+ if (ACTIVE_MQ_ARTEMISES_CLIENT == null) {
+ CustomResourceDefinition crd = customResourceDefinitionsClient()
+ .withName(ACTIVEMQ_ARTEMIS_CRD_NAME).get();
+ if (crd == null) {
+ throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
+ ACTIVEMQ_ARTEMIS_CRD_NAME, OPERATOR_ID));
+ }
+ ACTIVE_MQ_ARTEMISES_CLIENT = activeMQArtemisCustomResourcesClient(CustomResourceDefinitionContext.fromCrd(crd));
+ }
+ return ACTIVE_MQ_ARTEMISES_CLIENT;
}
- @Override
- protected String getOperatorCatalogSource() {
- return IntersmashConfig.activeMQOperatorCatalogSource();
+ private static NonNamespaceOperation> ACTIVE_MQ_ARTEMIS_ADDRESSES_CLIENT;
+
+ /**
+ * Get a client capable of working with {@link ActiveMQOperatorProvisioner#ACTIVEMQ_ARTEMIS_ADDRESS_CRD_NAME} custom resource.
+ *
+ * @return client for operations with {@link ActiveMQOperatorProvisioner#ACTIVEMQ_ARTEMIS_ADDRESS_CRD_NAME} custom resource
+ */
+ public NonNamespaceOperation> activeMQArtemisAddressesClient() {
+ if (ACTIVE_MQ_ARTEMIS_ADDRESSES_CLIENT == null) {
+ CustomResourceDefinition crd = customResourceDefinitionsClient()
+ .withName(ACTIVEMQ_ARTEMIS_ADDRESS_CRD_NAME).get();
+ if (crd == null) {
+ throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
+ ACTIVEMQ_ARTEMIS_ADDRESS_CRD_NAME, OPERATOR_ID));
+ }
+ ACTIVE_MQ_ARTEMIS_ADDRESSES_CLIENT = activeMQArtemisAddressesCustomResourcesClient(
+ CustomResourceDefinitionContext.fromCrd(crd));
+ }
+ return ACTIVE_MQ_ARTEMIS_ADDRESSES_CLIENT;
}
- @Override
- protected String getOperatorIndexImage() {
- return IntersmashConfig.activeMQOperatorIndexImage();
+ /**
+ * Get a reference to activeMQArtemis object. Use get() to get the actual object, or null in case it does not
+ * exist on tested cluster.
+ * @return A concrete {@link Resource} instance representing the {@link ActiveMQArtemis} resource definition
+ */
+ public Resource activeMQArtemis() {
+ return activeMQArtemisesClient().withName(getApplication().getActiveMQArtemis().getMetadata().getName());
}
- @Override
- protected String getOperatorChannel() {
- return IntersmashConfig.activeMQOperatorChannel();
+ /**
+ * Get a reference to activeMQArtemisAddress object. Use get() to get the actual object, or null in case it does not
+ * exist on tested cluster.
+ *
+ * @param name name of the activeMQArtemisAddress custom resource
+ * @return A concrete {@link Resource} instance representing the {@link ActiveMQArtemisAddress} resource definition
+ */
+ public Resource activeMQArtemisAddress(String name) {
+ return activeMQArtemisAddressesClient().withName(name);
+ }
+
+ /**
+ * Get all activeMQArtemisAddresses maintained by the current operator instance.
+ *
+ * Be aware that this method return just a references to the addresses, they might not actually exist on the cluster.
+ * Use get() to get the actual object, or null in case it does not exist on tested cluster.
+ * @return A list of {@link Resource} instances representing the {@link ActiveMQArtemisAddress} resource definitions
+ */
+ public List> activeMQArtemisAddresses() {
+ ActiveMQOperatorApplication activeMqOperatorApplication = getApplication();
+ return activeMqOperatorApplication.getActiveMQArtemisAddresses().stream()
+ .map(activeMQArtemisAddress -> activeMQArtemisAddress.getMetadata().getName())
+ .map(this::activeMQArtemisAddress)
+ .collect(Collectors.toList());
}
}
diff --git a/provisioners/src/main/java/org/jboss/intersmash/provision/operator/HyperfoilOperatorProvisioner.java b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/HyperfoilOperatorProvisioner.java
new file mode 100644
index 000000000..e137cf0bc
--- /dev/null
+++ b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/HyperfoilOperatorProvisioner.java
@@ -0,0 +1,240 @@
+package org.jboss.intersmash.provision.operator;
+
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
+
+import org.jboss.intersmash.IntersmashConfig;
+import org.jboss.intersmash.application.operator.HyperfoilOperatorApplication;
+import org.jboss.intersmash.k8s.KubernetesConfig;
+import org.jboss.intersmash.provision.Provisioner;
+import org.jboss.intersmash.application.k8s.HasPods;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Strings;
+
+import cz.xtf.core.http.Https;
+import cz.xtf.core.waiting.SimpleWaiter;
+import cz.xtf.core.waiting.failfast.FailFastCheck;
+import io.fabric8.kubernetes.api.model.DeletionPropagation;
+import io.fabric8.kubernetes.api.model.Pod;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinitionList;
+import io.fabric8.kubernetes.api.model.networking.v1.Ingress;
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
+import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
+import io.fabric8.kubernetes.client.dsl.Resource;
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
+import io.fabric8.kubernetes.client.dsl.internal.HasMetadataOperationsImpl;
+import io.hyperfoil.v1alpha2.Hyperfoil;
+import io.hyperfoil.v1alpha2.HyperfoilList;
+import lombok.NonNull;
+
+public abstract class HyperfoilOperatorProvisioner extends
+ OperatorProvisioner implements Provisioner, HasPods {
+
+ public HyperfoilOperatorProvisioner(@NonNull HyperfoilOperatorApplication application) {
+ super(application, HyperfoilOperatorProvisioner.OPERATOR_ID);
+ }
+
+ // =================================================================================================================
+ // Hyperfoil related
+ // =================================================================================================================
+ protected Ingress retrieveNamedIngress(final String ingressName) {
+ return this.client().network().v1().ingresses().withName(ingressName).get();
+ }
+
+ public void undeploy(boolean unsubscribe) {
+ hyperfoil().withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
+ BooleanSupplier bs = () -> getPods().stream()
+ .noneMatch(p -> !Strings.isNullOrEmpty(p.getMetadata().getLabels().get("app"))
+ && p.getMetadata().getLabels().get("app").equals(getApplication().getName()));
+ String reason = "Waiting for exactly 0 pods with label \"app\"=" + getApplication().getName() + " to be ready.";
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2, reason)
+ .level(Level.DEBUG)
+ .waitFor();
+ if (unsubscribe) {
+ unsubscribe();
+ }
+ }
+
+ /**
+ * This method checks if the Operator's POD is actually running;
+ * It's been tailored on the community-operators Cluster Service version format which is missing label
+ * spec.install.spec.deployments.spec.template.metadata.labels."app.kubernetes.io/name"
which is used
+ * in @see OperatorProvisioner#waitForOperatorPod() (see
+ * https://github.com/operator-framework/community-operators/tree/master/community-operators/hyperfoil-bundle)
+ */
+ @Override
+ protected void waitForOperatorPod() {
+ String[] operatorSpecs = this.execute("get", "csvs", getCurrentCSV(), "-o", "template", "--template",
+ "{{range .spec.install.spec.deployments}}{{printf \"%d|%s\\n\" .spec.replicas .name}}{{end}}")
+ .split(System.lineSeparator());
+ for (String spec : operatorSpecs) {
+ String[] operatorSpec = spec.split("\\|");
+ if (operatorSpec.length != 2) {
+ throw new RuntimeException("Failed to get operator deployment spec from csvs!");
+ }
+ new SimpleWaiter(() -> getPods().stream().filter(
+ pod -> (pod.getMetadata()
+ .getName()
+ .startsWith(operatorSpec[1])
+ && pod.getStatus().getPhase().equalsIgnoreCase("Running")))
+ .count() == Integer.parseInt(operatorSpec[0]))
+ .failFast(() -> false)
+ .reason("Wait for expected number of replicas to be active.")
+ .level(Level.DEBUG)
+ .waitFor();
+ }
+ }
+
+ /**
+ * Tells if a specific container inside the pod is ready
+ *
+ * @param pod
+ * @param containerName: name of the container
+ * @return
+ */
+ private boolean isContainerReady(Pod pod, String containerName) {
+ if (Objects.nonNull(pod)) {
+ return pod.getStatus().getContainerStatuses().stream()
+ .filter(containerStatus -> containerStatus.getName().equalsIgnoreCase(containerName)
+ && containerStatus.getReady())
+ .count() > 0;
+ }
+ return false;
+ }
+
+ // =================================================================================================================
+ // Related to generic provisioning behavior
+ // =================================================================================================================
+ /**
+ * HyperFoil is community only, so we return the Operator namespace concretely here
+ * @return String representing the Operator namespace
+ */
+ @Override
+ protected String getOperatorNamespace() {
+ return "olm";
+ }
+
+ @Override
+ public String getOperatorCatalogSource() {
+ return IntersmashConfig.hyperfoilOperatorCatalogSource();
+ }
+
+ @Override
+ public String getOperatorIndexImage() {
+ return IntersmashConfig.hyperfoilOperatorIndexImage();
+ }
+
+ @Override
+ public String getOperatorChannel() {
+ return IntersmashConfig.hyperfoilOperatorChannel();
+ }
+
+ @Override
+ protected String getCatalogSourceNamespace() {
+ String namespace = super.getCatalogSourceNamespace();
+ if (!org.assertj.core.util.Strings.isNullOrEmpty(getOperatorIndexImage())) {
+ namespace = KubernetesConfig.namespace();
+ }
+ return namespace;
+ }
+
+ @Override
+ public void deploy() {
+ FailFastCheck ffCheck = () -> false;
+ if (!isSubscribed()) {
+ subscribe();
+ }
+ hyperfoilClient().createOrReplace(getApplication().getHyperfoil());
+ new SimpleWaiter(() -> hyperfoil().get().getStatus() != null)
+ .failFast(ffCheck)
+ .reason("Wait for status field to be initialized.")
+ .level(Level.DEBUG)
+ .waitFor();
+ new SimpleWaiter(() -> getPods().size() == 1)
+ .failFast(ffCheck)
+ .reason("Wait for expected number of replicas to be active.")
+ .level(Level.DEBUG)
+ .waitFor();
+ new SimpleWaiter(
+ () -> Https.getCode(getURL().toExternalForm()) != 503)
+ .reason("Wait until the route is ready to serve.");
+ }
+
+ // TODO: check for removal
+ // @Override
+ // default List getPods() {
+ // List pods = new ArrayList<>();
+ // Pod hyperfoilControllerPod = getPod(String.format("%s-controller", getApplication().getName()));
+ // if (isContainerReady(hyperfoilControllerPod, "controller")) {
+ // pods.add(hyperfoilControllerPod);
+ // }
+ // return pods;
+ // }
+
+ @Override
+ public void undeploy() {
+ undeploy(true);
+ }
+
+ @Override
+ public void scale(int replicas, boolean wait) {
+ throw new UnsupportedOperationException("Scaling is not implemented by Hyperfoil operator based provisioning");
+ }
+
+ // =================================================================================================================
+ // Client related
+ // =================================================================================================================
+ // this is the packagemanifest for the hyperfoil operator;
+ // you can get it with command:
+ // oc get packagemanifest hyperfoil-bundle -o template --template='{{ .metadata.name }}'
+ public static String OPERATOR_ID = IntersmashConfig.hyperfoilOperatorPackageManifest();
+
+ // this is the name of the Hyperfoil CustomResourceDefinition
+ // you can get it with command:
+ // oc get crd hyperfoils.hyperfoil.io -o template --template='{{ .metadata.name }}'
+ protected static String HYPERFOIL_CRD_NAME = "hyperfoils.hyperfoil.io";
+
+ /**
+ * Generic CRD client which is used by client builders default implementation to build the CRDs client
+ *
+ * @return A {@link NonNamespaceOperation} instance that represents a
+ */
+ protected abstract NonNamespaceOperation> customResourceDefinitionsClient();
+
+ // hyperfoils.hyperfoil.io
+ protected abstract HasMetadataOperationsImpl hyperfoilCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
+
+ protected static NonNamespaceOperation> HYPERFOIL_CUSTOM_RESOURCE_CLIENT;
+
+ /**
+ * Get a client capable of working with {@link HyperfoilOperatorProvisioner#HYPERFOIL_CRD_NAME} custom resource.
+ *
+ * @return client for operations with {@link HyperfoilOperatorProvisioner#HYPERFOIL_CRD_NAME} custom resource
+ */
+ public NonNamespaceOperation> hyperfoilClient() {
+ if (HYPERFOIL_CUSTOM_RESOURCE_CLIENT == null) {
+ CustomResourceDefinition crd = customResourceDefinitionsClient()
+ .withName(HYPERFOIL_CRD_NAME).get();
+ if (crd == null) {
+ throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
+ HYPERFOIL_CRD_NAME, OPERATOR_ID));
+ }
+ HYPERFOIL_CUSTOM_RESOURCE_CLIENT = hyperfoilCustomResourcesClient(CustomResourceDefinitionContext.fromCrd(crd));
+ }
+ return HYPERFOIL_CUSTOM_RESOURCE_CLIENT;
+ }
+
+ /**
+ * Get a reference to Hyperfoil object. Use get() to get the actual object, or null in case it does not
+ * exist on tested cluster.
+ *
+ * @return A concrete {@link Resource} instance representing the {@link Hyperfoil} resource definition
+ */
+ public Resource hyperfoil() {
+ return hyperfoilClient().withName(getApplication().getName());
+ }
+}
diff --git a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/InfinispanOperatorProvisioner.java b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/InfinispanOperatorProvisioner.java
similarity index 50%
rename from provisioners/src/main/java/org/jboss/intersmash/provision/openshift/InfinispanOperatorProvisioner.java
rename to provisioners/src/main/java/org/jboss/intersmash/provision/operator/InfinispanOperatorProvisioner.java
index e905ffcad..e5515edec 100644
--- a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/InfinispanOperatorProvisioner.java
+++ b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/InfinispanOperatorProvisioner.java
@@ -13,65 +13,115 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.jboss.intersmash.provision.openshift;
+package org.jboss.intersmash.provision.operator;
-import java.net.MalformedURLException;
-import java.net.URL;
import java.util.List;
-import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
-import org.assertj.core.util.Lists;
import org.infinispan.v1.Infinispan;
-import org.infinispan.v1.infinispanspec.Expose;
import org.infinispan.v2alpha1.Cache;
import org.jboss.intersmash.IntersmashConfig;
-import org.jboss.intersmash.application.openshift.InfinispanOperatorApplication;
-import org.jboss.intersmash.provision.openshift.operator.OperatorProvisioner;
+import org.jboss.intersmash.application.operator.InfinispanOperatorApplication;
+import org.jboss.intersmash.provision.Provisioner;
import org.jboss.intersmash.provision.openshift.operator.infinispan.cache.CacheList;
import org.jboss.intersmash.provision.openshift.operator.infinispan.infinispan.InfinispanList;
import org.jboss.intersmash.provision.openshift.operator.infinispan.infinispan.spec.InfinispanConditionBuilder;
import org.slf4j.event.Level;
-import cz.xtf.core.config.OpenShiftConfig;
-import cz.xtf.core.event.helpers.EventHelper;
-import cz.xtf.core.openshift.OpenShiftWaiters;
-import cz.xtf.core.openshift.OpenShifts;
import cz.xtf.core.waiting.SimpleWaiter;
+import cz.xtf.core.waiting.failfast.FailFastCheck;
import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
-import io.fabric8.kubernetes.api.model.apps.StatefulSet;
-import io.fabric8.kubernetes.client.dsl.MixedOperation;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinitionList;
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
-import io.fabric8.openshift.api.model.Route;
-import lombok.NonNull;
+import io.fabric8.kubernetes.client.dsl.internal.HasMetadataOperationsImpl;
-public class InfinispanOperatorProvisioner extends OperatorProvisioner {
- private static final String INFINISPAN_RESOURCE = "infinispans.infinispan.org";
- private static NonNamespaceOperation> INFINISPAN_CLIENT;
+/**
+ * Defines the contract and default behavior of an Operator based provisioner for the Infinispan Operator
+ */
+public abstract class InfinispanOperatorProvisioner extends
+ OperatorProvisioner implements Provisioner {
- private static final String INFINISPAN_CACHE_RESOURCE = "caches.infinispan.org";
- private static NonNamespaceOperation> INFINISPAN_CACHES_CLIENT;
+ public InfinispanOperatorProvisioner(InfinispanOperatorApplication application) {
+ super(application, InfinispanOperatorProvisioner.OPERATOR_ID);
+ }
- // oc get packagemanifest datagrid -n openshift-marketplace
- private static final String OPERATOR_ID = IntersmashConfig.infinispanOperatorPackageManifest();
+ // =================================================================================================================
+ // Infinispan related
+ // =================================================================================================================
+ public List getInfinispanPods() {
+ return getPods().stream().filter(
+ // the following criteria is implemented based on similar requirements taken from the
+ // infinispan-operator project, see
+ // https://github.com/infinispan/infinispan-operator/blob/main/test/e2e/utils/kubernetes.go#L599-L604
+ p -> p.getMetadata().getLabels().entrySet().stream()
+ .anyMatch(tl -> "app".equals(tl.getKey()) && "infinispan-pod".equals(tl.getValue())
+ && p.getMetadata().getLabels().entrySet().stream().anyMatch(
+ cnl -> "clusterName".equals(cnl.getKey())
+ && getApplication().getName().equals(cnl.getValue()))))
+ .collect(Collectors.toList());
+ }
- public InfinispanOperatorProvisioner(@NonNull InfinispanOperatorApplication infinispanOperatorApplication) {
- super(infinispanOperatorApplication, OPERATOR_ID);
+ protected Service getService(final String name) {
+ return this.client().services().withName(name).get();
}
- public static String getOperatorId() {
- return OPERATOR_ID;
+ // TODO: check for removal
+ // default List getStatefulSetPods() {
+ // StatefulSet statefulSet = getStatefulSet(getApplication().getName());
+ // return Objects.nonNull(statefulSet)
+ // ? getPods().stream()
+ // .filter(p -> p.getMetadata().getLabels().get("controller-revision-hash") != null
+ // && p.getMetadata().getLabels().get("controller-revision-hash")
+ // .equals(statefulSet.getStatus().getUpdateRevision()))
+ // .collect(Collectors.toList())
+ // : List.of();
+ // }
+
+ private void waitForResourceReadiness() {
+ // it must be well-formed
+ // see https://github.com/kubernetes/apimachinery/blob/v0.20.4/pkg/apis/meta/v1/types.go#L1289
+ new SimpleWaiter(
+ () -> infinispan().get().getStatus().getConditions().stream()
+ .anyMatch(
+ c -> c.getType().equals(InfinispanConditionBuilder.ConditionType.ConditionWellFormed.getValue())
+ && c.getStatus().equals("True")))
+ .reason("Wait for infinispan resource to be ready").level(Level.DEBUG)
+ .waitFor();
+ // and with the expected number of Cache CR(s)
+ if (getApplication().getCaches().size() > 0)
+ new SimpleWaiter(() -> cachesClient().list().getItems().size() == caches().size())
+ .reason("Wait for caches to be ready.").level(Level.DEBUG).waitFor();
+ }
+
+ // =================================================================================================================
+ // Related to generic provisioning behavior
+ // =================================================================================================================
+ @Override
+ public String getOperatorCatalogSource() {
+ return IntersmashConfig.infinispanOperatorCatalogSource();
+ }
+
+ @Override
+ public String getOperatorIndexImage() {
+ return IntersmashConfig.infinispanOperatorIndexImage();
+ }
+
+ @Override
+ public String getOperatorChannel() {
+ return IntersmashConfig.infinispanOperatorChannel();
}
@Override
public void deploy() {
- ffCheck = FailFastUtils.getFailFastCheck(EventHelper.timeOfLastEventBMOrTestNamespaceOrEpoch(),
- getApplication().getName());
+ FailFastCheck ffCheck = () -> false;
subscribe();
// create Infinispan CR
final int replicas = getApplication().getInfinispan().getSpec().getReplicas();
@@ -89,15 +139,15 @@ public void deploy() {
@Override
public void undeploy() {
+ FailFastCheck ffCheck = () -> false;
// delete Cache CR(s)
caches().forEach(keycloakUser -> keycloakUser.withPropagationPolicy(DeletionPropagation.FOREGROUND).delete());
// delete Infinispan CR
infinispan().withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
- // wait for 0 pods, and here it waits for no pods to exist with the `clusterName=` label,
- // since all CRs have been deleted
- OpenShiftWaiters.get(OpenShiftProvisioner.openShift, ffCheck)
- .areExactlyNPodsReady(0, "clusterName", getApplication().getInfinispan().getMetadata().getName())
- .level(Level.DEBUG)
+ // wait for 0 pods
+ BooleanSupplier bs = () -> getInfinispanPods().isEmpty();
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2,
+ "Waiting for 0 pods with label \"clusterName\"=" + getApplication().getInfinispan().getMetadata().getName())
.waitFor();
unsubscribe();
}
@@ -116,112 +166,65 @@ public void scale(int replicas, boolean wait) {
}
}
- @Override
- public List getPods() {
- StatefulSet statefulSet = OpenShiftProvisioner.openShift.getStatefulSet(getApplication().getName());
- return Objects.nonNull(statefulSet)
- ? OpenShiftProvisioner.openShift.getLabeledPods("controller-revision-hash",
- statefulSet.getStatus().getUpdateRevision())
- : Lists.emptyList();
- }
+ // =================================================================================================================
+ // Client related
+ // =================================================================================================================
+ // this is the packagemanifest for the operator;
+ // you can get it with command:
+ // oc get packagemanifest -o template --template='{{ .metadata.name }}'
+ public static String OPERATOR_ID = IntersmashConfig.infinispanOperatorPackageManifest();
- public List getInfinispanPods() {
- return getInfinispanPods(getApplication().getName());
- }
+ // this is the name of the CustomResourceDefinition(s)
+ // you can get it with command:
+ // oc get crd -o template --template='{{ .metadata.name }}'
+ private static String INFINISPAN_CRD_NAME = "infinispans.infinispan.org";
- public static List getInfinispanPods(final String clusterName) {
- return OpenShiftProvisioner.openShift.inNamespace(OpenShiftConfig.namespace()).pods().list().getItems().stream().filter(
- // the following criteria is implemented based on similar requirements taken from the
- // infinispan-operator project, see
- // https://github.com/infinispan/infinispan-operator/blob/main/test/e2e/utils/kubernetes.go#L599-L604
- p -> p.getMetadata().getLabels().entrySet().stream()
- .anyMatch(tl -> "app".equals(tl.getKey()) && "infinispan-pod".equals(tl.getValue())
- && p.getMetadata().getLabels().entrySet().stream().anyMatch(
- cnl -> "clusterName".equals(cnl.getKey()) && clusterName.equals(cnl.getValue()))))
- .collect(Collectors.toList());
- }
+ private static String INFINISPAN_CACHE_CRD_NAME = "caches.infinispan.org";
- @Override
- protected String getOperatorCatalogSource() {
- return IntersmashConfig.infinispanOperatorCatalogSource();
- }
+ /**
+ * Generic CRD client which is used by client builders default implementation to build the CRDs client
+ *
+ * @return A {@link NonNamespaceOperation} instance that represents a
+ */
+ protected abstract NonNamespaceOperation> customResourceDefinitionsClient();
- @Override
- protected String getOperatorIndexImage() {
- return IntersmashConfig.infinispanOperatorIndexImage();
- }
+ // infinispans.infinispan.org
+ protected abstract HasMetadataOperationsImpl infinispanCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
- @Override
- protected String getOperatorChannel() {
- return IntersmashConfig.infinispanOperatorChannel();
- }
+ // caches.infinispan.org
+ protected abstract HasMetadataOperationsImpl cacheCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
- /**
- * The result is affected by the CR definition and specifically the method will return the {@code service} URL in
- * case the CR {@code .spec.expose.type} is set to {@code NodePort} or {@code LoadBalancer} while it will return the
- * route URL (i.e. for external access) when {@code .spec.expose.type} is set to {@code Route}
- * @return The URL for the provisioned Infinispan service
- */
- @Override
- public URL getURL() {
- final Service defaultInternalService = OpenShiftProvisioner.openShift.getService(getApplication().getName());
- String internalUrl = "http://" + defaultInternalService.getSpec().getClusterIP() + ":11222";
- String externalUrl = null;
- if (getApplication().getInfinispan().getSpec().getExpose() != null) {
- final Expose.Type exposedType = getApplication().getInfinispan().getSpec().getExpose().getType();
- switch (exposedType) {
- case NodePort:
- // see see https://github.com/infinispan/infinispan-operator/blob/2.0.x/pkg/apis/infinispan/v1/infinispan_types.go#L107
- externalUrl = "http://"
- + OpenShiftProvisioner.openShift.getService(getApplication().getName() + "-external").getSpec()
- .getClusterIP()
- + getApplication().getInfinispan().getSpec().getExpose().getNodePort();
- break;
- case LoadBalancer:
- // see https://github.com/infinispan/infinispan-operator/blob/2.0.x/pkg/apis/infinispan/v1/infinispan_types.go#L111
- externalUrl = "http://"
- + OpenShiftProvisioner.openShift.getService(getApplication().getName() + "-external").getSpec()
- .getExternalIPs().get(0)
- + getApplication().getInfinispan().getSpec().getExpose().getNodePort();
- break;
- case Route:
- // https://github.com/infinispan/infinispan-operator/blob/2.0.x/pkg/apis/infinispan/v1/infinispan_types.go#L116
- Route route = OpenShiftProvisioner.openShift.getRoute(getApplication().getName() + "-external");
- externalUrl = "https://" + route.getSpec().getHost();
- break;
- default:
- throw new UnsupportedOperationException(String.format("Unsupported .spec.expose.type: %s", exposedType));
- }
- }
- try {
- return new URL(externalUrl == null ? internalUrl : externalUrl);
- } catch (MalformedURLException e) {
- throw new RuntimeException(String.format("Infinispan operator Internal URL \"%s\" is malformed.", internalUrl), e);
- }
- }
+ private static NonNamespaceOperation> INFINISPAN_CLIENT;
+ private static NonNamespaceOperation> INFINISPAN_CACHES_CLIENT;
- /**
- * Get a client capable of working with {@link #INFINISPAN_RESOURCE} custom resource.
- *
- * @return client for operations with {@link #INFINISPAN_RESOURCE} custom resource
- */
public NonNamespaceOperation> infinispansClient() {
if (INFINISPAN_CLIENT == null) {
- CustomResourceDefinition crd = OpenShifts.admin().apiextensions().v1().customResourceDefinitions()
- .withName(INFINISPAN_RESOURCE).get();
- CustomResourceDefinitionContext crdc = CustomResourceDefinitionContext.fromCrd(crd);
- if (!getCustomResourceDefinitions().contains(INFINISPAN_RESOURCE)) {
+ CustomResourceDefinition crd = customResourceDefinitionsClient()
+ .withName(INFINISPAN_CRD_NAME).get();
+ if (crd == null) {
throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
- INFINISPAN_RESOURCE, OPERATOR_ID));
+ INFINISPAN_CRD_NAME, OPERATOR_ID));
}
- MixedOperation> infinispansClient = OpenShifts
- .master()
- .newHasMetadataOperation(crdc, Infinispan.class, InfinispanList.class);
- INFINISPAN_CLIENT = infinispansClient.inNamespace(OpenShiftConfig.namespace());
+ INFINISPAN_CLIENT = infinispanCustomResourcesClient(CustomResourceDefinitionContext.fromCrd(crd));
}
return INFINISPAN_CLIENT;
}
+ public NonNamespaceOperation> cachesClient() {
+ if (INFINISPAN_CACHES_CLIENT == null) {
+ CustomResourceDefinition crd = customResourceDefinitionsClient()
+ .withName(INFINISPAN_CACHE_CRD_NAME).get();
+ if (crd == null) {
+ throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
+ INFINISPAN_CACHE_CRD_NAME, OPERATOR_ID));
+ }
+ INFINISPAN_CACHES_CLIENT = cacheCustomResourcesClient(CustomResourceDefinitionContext.fromCrd(crd));
+ }
+ return INFINISPAN_CACHES_CLIENT;
+ }
+
/**
* Get a reference to infinispan object. Use get() to get the actual object, or null in case it does not
* exist on tested cluster.
@@ -231,30 +234,6 @@ public Resource infinispan() {
return infinispansClient().withName(getApplication().getInfinispan().getMetadata().getName());
}
- // caches.infinispan.org
-
- /**
- * Get a client capable of working with {@link #INFINISPAN_CACHE_RESOURCE} custom resource.
- *
- * @return client for operations with {@link #INFINISPAN_CACHE_RESOURCE} custom resource
- */
- public NonNamespaceOperation> cachesClient() {
- if (INFINISPAN_CACHES_CLIENT == null) {
- CustomResourceDefinition crd = OpenShifts.admin().apiextensions().v1().customResourceDefinitions()
- .withName(INFINISPAN_CACHE_RESOURCE).get();
- CustomResourceDefinitionContext crdc = CustomResourceDefinitionContext.fromCrd(crd);
- if (!getCustomResourceDefinitions().contains(INFINISPAN_CACHE_RESOURCE)) {
- throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
- INFINISPAN_CACHE_RESOURCE, OPERATOR_ID));
- }
- MixedOperation> cachesClient = OpenShifts
- .master()
- .newHasMetadataOperation(crdc, Cache.class, CacheList.class);
- INFINISPAN_CACHES_CLIENT = cachesClient.inNamespace(OpenShiftConfig.namespace());
- }
- return INFINISPAN_CACHES_CLIENT;
- }
-
/**
* Get a reference to cache object. Use get() to get the actual object, or null in case it does not
* exist on tested cluster.
@@ -280,20 +259,4 @@ public List> caches() {
.map(this::cache)
.collect(Collectors.toList());
}
-
- private void waitForResourceReadiness() {
- // it must be well-formed
- // see https://github.com/kubernetes/apimachinery/blob/v0.20.4/pkg/apis/meta/v1/types.go#L1289
- new SimpleWaiter(
- () -> infinispan().get().getStatus().getConditions().stream()
- .anyMatch(
- c -> c.getType().equals(InfinispanConditionBuilder.ConditionType.ConditionWellFormed.getValue())
- && c.getStatus().equals("True")))
- .reason("Wait for infinispan resource to be ready").level(Level.DEBUG)
- .waitFor();
- // and with the expected number of Cache CR(s)
- if (getApplication().getCaches().size() > 0)
- new SimpleWaiter(() -> cachesClient().list().getItems().size() == caches().size())
- .reason("Wait for caches to be ready.").level(Level.DEBUG).waitFor(); // no isReady() for cache
- }
}
diff --git a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KafkaOperatorProvisioner.java b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/KafkaOperatorProvisioner.java
similarity index 73%
rename from provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KafkaOperatorProvisioner.java
rename to provisioners/src/main/java/org/jboss/intersmash/provision/operator/KafkaOperatorProvisioner.java
index a312c68c1..10df4fe20 100644
--- a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KafkaOperatorProvisioner.java
+++ b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/KafkaOperatorProvisioner.java
@@ -13,21 +13,25 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.jboss.intersmash.provision.openshift;
+package org.jboss.intersmash.provision.operator;
import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
import org.jboss.intersmash.IntersmashConfig;
-import org.jboss.intersmash.application.openshift.KafkaOperatorApplication;
-import org.jboss.intersmash.provision.openshift.operator.OperatorProvisioner;
+import org.jboss.intersmash.application.operator.KafkaOperatorApplication;
+import org.jboss.intersmash.provision.Provisioner;
import org.slf4j.event.Level;
-import cz.xtf.core.config.OpenShiftConfig;
-import cz.xtf.core.event.helpers.EventHelper;
-import cz.xtf.core.openshift.OpenShiftWaiters;
import cz.xtf.core.waiting.SimpleWaiter;
+import cz.xtf.core.waiting.failfast.FailFastCheck;
import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.Pod;
+import io.fabric8.kubernetes.api.model.StatusDetails;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinitionList;
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.strimzi.api.kafka.Crds;
@@ -45,87 +49,62 @@
* class.
*/
@Slf4j
-public class KafkaOperatorProvisioner extends OperatorProvisioner {
+public abstract class KafkaOperatorProvisioner extends
+ OperatorProvisioner implements Provisioner {
- private static final String OPERATOR_ID = IntersmashConfig.kafkaOperatorPackageManifest();
-
- public KafkaOperatorProvisioner(@NonNull KafkaOperatorApplication kafkaOperatorApplication) {
- super(kafkaOperatorApplication, OPERATOR_ID);
+ public KafkaOperatorProvisioner(@NonNull KafkaOperatorApplication application) {
+ super(application, KafkaOperatorProvisioner.OPERATOR_ID);
}
- /**
- * Get a client capable of working with {@link Kafka} custom resource on our OpenShift instance.
- *
- * @return client for operations with {@link Kafka} custom resource on our OpenShift instance
- */
- public NonNamespaceOperation> kafkasClient() {
- return Crds.kafkaOperation(OpenShiftProvisioner.openShift).inNamespace(OpenShiftConfig.namespace());
+ // =================================================================================================================
+ // Kafka related
+ // =================================================================================================================
+ public List getClusterOperatorPods() {
+ return this.client().pods().inNamespace(this.client().getNamespace())
+ .withLabel("strimzi.io/kind", "cluster-operator").list().getItems();
}
/**
- * Kafka cluster resource on OpenShift instance. The Kafka resource returned is the one that is tied with the
- * appropriate Application for which this provisioner is created for. The instance is determined based on the name
- * value defined in specifications.
- *
- * @return returns Kafka cluster resource on OpenShift instance that is tied with our relevant Application only
+ * Get list of all Kafka pods on OpenShift instance with regards this Kafka cluster.
+ *
+ * Note: Operator actually creates also pods for Kafka, instance entity operator pods and cluster operator pod.
+ * But we list only Kafka related pods here.
+ * @return list of Kafka pods
*/
- public Resource kafka() {
- return kafkasClient().withName(getApplication().getKafka().getMetadata().getName());
- }
+ public List getKafkaPods() {
+ List kafkaPods = this.client().pods().inNamespace(this.client().getNamespace())
+ .withLabel("app.kubernetes.io/name", "kafka").list().getItems();
+ // Let's filter out just those who match particular naming
+ for (Pod kafkaPod : kafkaPods) {
+ if (!kafkaPod.getMetadata().getName().contains(getApplication().getName() + "-kafka-")) {
+ kafkaPods.remove(kafkaPod);
+ }
+ }
- /**
- * Get a client capable of working with {@link KafkaUser} custom resource on our OpenShift instance.
- *
- * @return client for operations with {@link KafkaUser} custom resource on our OpenShift instance
- */
- public NonNamespaceOperation> kafkasUserClient() {
- return Crds.kafkaUserOperation(OpenShiftProvisioner.openShift).inNamespace(OpenShiftConfig.namespace());
+ return kafkaPods;
}
/**
- * Get a client capable of working with {@link KafkaTopic} custom resource on our OpenShift instance.
- *
- * @return client for operations with {@link KafkaTopic} custom resource on our OpenShift instance
+ * Get list of all Zookeeper pods on OpenShift instance with regards this Kafka cluster.
+ *
+ * Note: Operator actually creates also pods for Kafka, instance entity operator pods and cluster operator pod.
+ * But we list only Zookeeper related pods here.
+ * @return list of Kafka pods
*/
- public NonNamespaceOperation> kafkasTopicClient() {
- return Crds.topicOperation(OpenShiftProvisioner.openShift).inNamespace(OpenShiftConfig.namespace());
- }
-
- @Override
- public void deploy() {
- ffCheck = FailFastUtils.getFailFastCheck(EventHelper.timeOfLastEventBMOrTestNamespaceOrEpoch(),
- getApplication().getName());
-
- subscribe();
-
- if (getApplication().getKafka() != null) {
- // Create a Kafka cluster instance
- kafkasClient().createOrReplace(getApplication().getKafka());
- waitForKafkaClusterCreation();
- }
-
- if (getApplication().getTopics() != null) {
- for (KafkaTopic topic : getApplication().getTopics()) {
- // Create a Kafka topic instance
- kafkasTopicClient().createOrReplace(topic);
-
- // Wait for it to be created and ready...
- waitForKafkaTopicCreation(topic);
- }
- }
-
- if (getApplication().getUsers() != null) {
- for (KafkaUser user : getApplication().getUsers()) {
- // Create a Kafka user instance
- kafkasUserClient().createOrReplace(user);
-
- // Wait for it to be created and ready...
- waitForKafkaUserCreation(user);
+ public List getZookeeperPods() {
+ List kafkaPods = this.client().pods().inNamespace(this.client().getNamespace())
+ .withLabel("app.kubernetes.io/name", "zookeeper").list().getItems();
+ // Let's filter out just those who match particular naming
+ for (Pod kafkaPod : kafkaPods) {
+ if (!kafkaPod.getMetadata().getName().contains(getApplication().getName() + "-zookeeper-")) {
+ kafkaPods.remove(kafkaPod);
}
}
+ return kafkaPods;
}
- private void waitForKafkaClusterCreation() {
+ public void waitForKafkaClusterCreation() {
+ FailFastCheck ffCheck = () -> false;
int expectedReplicas = getApplication().getKafka().getSpec().getKafka().getReplicas();
new SimpleWaiter(() -> kafka().get() != null)
.failFast(ffCheck)
@@ -247,101 +226,96 @@ private void waitForKafkaUserCreation(KafkaUser user) {
"Waiting for user '" + userName + "' condition to be 'Ready'").level(Level.DEBUG).waitFor();
}
+ // =================================================================================================================
+ // Related to generic provisioning behavior
+ // =================================================================================================================
@Override
- public void undeploy() {
- // delete the resources
-
- if (getApplication().getUsers() != null) {
- if (kafkasUserClient().delete().isEmpty()) {
- log.warn("Wasn't able to remove all relevant 'Kafka User' resources created for '" + getApplication().getName()
- + "' instance!");
- }
-
- new SimpleWaiter(() -> kafkasUserClient().list().getItems().isEmpty()).level(Level.DEBUG).waitFor();
- }
-
- if (getApplication().getTopics() != null) {
- if (kafkasTopicClient().delete().isEmpty()) {
- log.warn("Wasn't able to remove all relevant 'Kafka Topic' resources created for '" + getApplication().getName()
- + "' instance!");
- }
-
- new SimpleWaiter(() -> kafkasTopicClient().list().getItems().isEmpty()).level(Level.DEBUG).waitFor();
- }
-
- if (getApplication().getKafka() != null) {
- if (kafka().withPropagationPolicy(DeletionPropagation.FOREGROUND).delete().isEmpty()) {
- log.warn("Wasn't able to remove all relevant 'Kafka' resources created for '" + getApplication().getName()
- + "' instance!");
- }
-
- new SimpleWaiter(() -> getKafkaPods().size() == 0).level(Level.DEBUG).waitFor();
- }
-
- unsubscribe();
-
- OpenShiftWaiters.get(OpenShiftProvisioner.openShift, ffCheck)
- .areExactlyNPodsReady(0, "name", getApplication().getName() + "-cluster-operator")
- .level(Level.DEBUG).waitFor();
- }
-
- public static String getOperatorId() {
- return OPERATOR_ID;
+ protected String getOperatorCatalogSource() {
+ return IntersmashConfig.kafkaOperatorCatalogSource();
}
- public KafkaUserList getUsers() {
- return kafkasUserClient().list();
+ @Override
+ protected String getOperatorIndexImage() {
+ return IntersmashConfig.kafkaOperatorIndexImage();
}
- public KafkaTopicList getTopics() {
- return kafkasTopicClient().list();
+ @Override
+ protected String getOperatorChannel() {
+ return IntersmashConfig.kafkaOperatorChannel();
}
@Override
public List getPods() {
- return OpenShiftProvisioner.openShift.getLabeledPods("strimzi.io/cluster", getApplication().getName());
+ return this.client().pods().inNamespace(this.client().getNamespace())
+ .withLabel("strimzi.io/cluster", getApplication().getName()).list().getItems();
}
- public List getClusterOperatorPods() {
- return OpenShiftProvisioner.openShift.getLabeledPods("strimzi.io/kind", "cluster-operator");
- }
+ @Override
+ public void deploy() {
+ subscribe();
+ if (getApplication().getKafka() != null) {
+ // Create a Kafka cluster instance
+ kafkasClient().createOrReplace(getApplication().getKafka());
+ waitForKafkaClusterCreation();
+ }
+ if (getApplication().getTopics() != null) {
+ for (KafkaTopic topic : getApplication().getTopics()) {
+ // Create a Kafka topic instance
+ kafkasTopicClient().createOrReplace(topic);
- /**
- * Get list of all Kafka pods on OpenShift instance with regards this Kafka cluster.
- *
- * Note: Operator actually creates also pods for Kafka, instance entity operator pods and cluster operator pod.
- * But we list only Kafka related pods here.
- * @return list of Kafka pods
- */
- public List getKafkaPods() {
- List kafkaPods = OpenShiftProvisioner.openShift.getLabeledPods("app.kubernetes.io/name", "kafka");
- // Let's filter out just those who match particular naming
- for (Pod kafkaPod : kafkaPods) {
- if (!kafkaPod.getMetadata().getName().contains(getApplication().getName() + "-kafka-")) {
- kafkaPods.remove(kafkaPod);
+ // Wait for it to be created and ready...
+ waitForKafkaTopicCreation(topic);
}
}
+ if (getApplication().getUsers() != null) {
+ for (KafkaUser user : getApplication().getUsers()) {
+ // Create a Kafka user instance
+ kafkasUserClient().createOrReplace(user);
- return kafkaPods;
+ // Wait for it to be created and ready...
+ waitForKafkaUserCreation(user);
+ }
+ }
}
- /**
- * Get list of all Zookeeper pods on OpenShift instance with regards this Kafka cluster.
- *
- * Note: Operator actually creates also pods for Kafka, instance entity operator pods and cluster operator pod.
- * But we list only Zookeeper related pods here.
- * @return list of Kafka pods
- */
- public List getZookeeperPods() {
- List kafkaPods = OpenShiftProvisioner.openShift.getLabeledPods("app.kubernetes.io/name", "zookeeper");
- // Let's filter out just those who match particular naming
- for (Pod kafkaPod : kafkaPods) {
- if (!kafkaPod.getMetadata().getName().contains(getApplication().getName() + "-zookeeper-")) {
- kafkaPods.remove(kafkaPod);
+ @Override
+ public void undeploy() {
+ // delete the resources
+ List deletionDetails;
+ boolean deleted;
+ if (getApplication().getUsers() != null) {
+ deletionDetails = kafkasUserClient().delete();
+ deleted = deletionDetails.stream().allMatch(d -> d.getCauses().isEmpty());
+ if (!deleted) {
+ log.warn("Wasn't able to remove all relevant 'Kafka User' resources created for '{}' instance!",
+ getApplication().getName());
}
+ new SimpleWaiter(() -> kafkasUserClient().list().getItems().isEmpty()).level(Level.DEBUG).waitFor();
}
-
- return kafkaPods;
+ if (getApplication().getTopics() != null) {
+ deletionDetails = kafkasTopicClient().delete();
+ deleted = deletionDetails.stream().allMatch(d -> d.getCauses().isEmpty());
+ if (!deleted) {
+ log.warn("Wasn't able to remove all relevant 'Kafka Topic' resources created for '{}' instance!",
+ getApplication().getName());
+ }
+ new SimpleWaiter(() -> kafkasTopicClient().list().getItems().isEmpty()).level(Level.DEBUG).waitFor();
+ }
+ if (getApplication().getKafka() != null) {
+ deletionDetails = kafka().withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
+ deleted = deletionDetails.stream().allMatch(d -> d.getCauses().isEmpty());
+ if (!deleted) {
+ log.warn("Wasn't able to remove all relevant 'Kafka' resources created for '{}' instance!",
+ getApplication().getName());
+ }
+ new SimpleWaiter(() -> getKafkaPods().isEmpty()).level(Level.DEBUG).waitFor();
+ }
+ unsubscribe();
+ BooleanSupplier bs = () -> getPods().stream().noneMatch(p -> p.getMetadata().getLabels().get("name") != null
+ && p.getMetadata().getLabels().get("name").equals(getApplication().getName() + "-cluster-operator"));
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2,
+ "Waiting for 0 pods with label \"name\"=" + getApplication().getName() + "-cluster-operator")
+ .waitFor();
}
@Override
@@ -357,18 +331,64 @@ public void scale(int replicas, boolean wait) {
}
}
- @Override
- protected String getOperatorCatalogSource() {
- return IntersmashConfig.kafkaOperatorCatalogSource();
+ // =================================================================================================================
+ // Client related
+ // =================================================================================================================
+ // this is the packagemanifest for the hyperfoil operator;
+ // you can get it with command:
+ // oc get packagemanifest hyperfoil-bundle -o template --template='{{ .metadata.name }}'
+ public static String OPERATOR_ID = IntersmashConfig.kafkaOperatorPackageManifest();
+
+ /**
+ * Generic CRD client which is used by client builders default implementation to build the CRDs client
+ *
+ * @return A {@link NonNamespaceOperation} instance that represents a
+ */
+ protected abstract NonNamespaceOperation> customResourceDefinitionsClient();
+
+ /**
+ * Get a client capable of working with {@link Kafka} custom resource on our OpenShift instance.
+ *
+ * @return client for operations with {@link Kafka} custom resource on our OpenShift instance
+ */
+ public NonNamespaceOperation> kafkasClient() {
+ return Crds.kafkaOperation(this.client()).inNamespace(this.client().getNamespace());
}
- @Override
- protected String getOperatorIndexImage() {
- return IntersmashConfig.kafkaOperatorIndexImage();
+ /**
+ * Get a client capable of working with {@link KafkaUser} custom resource on our OpenShift instance.
+ *
+ * @return client for operations with {@link KafkaUser} custom resource on our OpenShift instance
+ */
+ public NonNamespaceOperation> kafkasUserClient() {
+ return Crds.kafkaUserOperation(this.client()).inNamespace(this.client().getNamespace());
}
- @Override
- protected String getOperatorChannel() {
- return IntersmashConfig.kafkaOperatorChannel();
+ /**
+ * Get a client capable of working with {@link KafkaTopic} custom resource on our OpenShift instance.
+ *
+ * @return client for operations with {@link KafkaTopic} custom resource on our OpenShift instance
+ */
+ public NonNamespaceOperation> kafkasTopicClient() {
+ return Crds.topicOperation(this.client()).inNamespace(this.client().getNamespace());
+ }
+
+ /**
+ * Kafka cluster resource on OpenShift instance. The Kafka resource returned is the one that is tied with the
+ * appropriate Application for which this provisioner is created for. The instance is determined based on the name
+ * value defined in specifications.
+ *
+ * @return returns Kafka cluster resource on OpenShift instance that is tied with our relevant Application only
+ */
+ public Resource kafka() {
+ return kafkasClient().withName(getApplication().getKafka().getMetadata().getName());
+ }
+
+ public KafkaUserList getUsers() {
+ return kafkasUserClient().list();
+ }
+
+ public KafkaTopicList getTopics() {
+ return kafkasTopicClient().list();
}
}
diff --git a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KeycloakOperatorProvisioner.java b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/KeycloakOperatorProvisioner.java
similarity index 60%
rename from provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KeycloakOperatorProvisioner.java
rename to provisioners/src/main/java/org/jboss/intersmash/provision/operator/KeycloakOperatorProvisioner.java
index 36c8d79ff..e3554671b 100644
--- a/provisioners/src/main/java/org/jboss/intersmash/provision/openshift/KeycloakOperatorProvisioner.java
+++ b/provisioners/src/main/java/org/jboss/intersmash/provision/operator/KeycloakOperatorProvisioner.java
@@ -13,20 +13,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.jboss.intersmash.provision.openshift;
+package org.jboss.intersmash.provision.operator;
-import java.net.MalformedURLException;
import java.net.URL;
import java.text.MessageFormat;
import java.util.List;
import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
import org.assertj.core.util.Lists;
import org.assertj.core.util.Strings;
import org.jboss.intersmash.IntersmashConfig;
-import org.jboss.intersmash.application.openshift.KeycloakOperatorApplication;
-import org.jboss.intersmash.provision.openshift.operator.OperatorProvisioner;
+import org.jboss.intersmash.application.operator.KeycloakOperatorApplication;
+import org.jboss.intersmash.provision.Provisioner;
import org.jboss.intersmash.util.tls.CertificatesUtils;
import org.keycloak.k8s.v2alpha1.Keycloak;
import org.keycloak.k8s.v2alpha1.KeycloakOperatorKeycloakList;
@@ -35,94 +36,51 @@
import org.keycloak.k8s.v2alpha1.keycloakspec.Http;
import org.slf4j.event.Level;
-import cz.xtf.core.config.OpenShiftConfig;
-import cz.xtf.core.event.helpers.EventHelper;
-import cz.xtf.core.openshift.OpenShiftWaiters;
-import cz.xtf.core.openshift.OpenShifts;
+import cz.xtf.core.http.Https;
import cz.xtf.core.waiting.SimpleWaiter;
import cz.xtf.core.waiting.failfast.FailFastCheck;
import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
+import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinitionList;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
-import io.fabric8.kubernetes.client.dsl.MixedOperation;
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
-import lombok.NonNull;
+import io.fabric8.kubernetes.client.dsl.internal.HasMetadataOperationsImpl;
/**
* Keycloak operator provisioner
*/
-public class KeycloakOperatorProvisioner extends OperatorProvisioner {
- private static final String KEYCLOAK_RESOURCE = "keycloaks.k8s.keycloak.org";
- private static final String KEYCLOAK_REALM_IMPORT_RESOURCE = "keycloakrealmimports.k8s.keycloak.org";
- private static NonNamespaceOperation> KEYCLOAK_CUSTOM_RESOURCE_CLIENT;
- private static NonNamespaceOperation> KEYCLOAK_REALM_IMPORT_CUSTOM_RESOURCE_CLIENT;
+public abstract class KeycloakOperatorProvisioner extends
+ OperatorProvisioner implements Provisioner {
- public NonNamespaceOperation> keycloakClient() {
- if (KEYCLOAK_CUSTOM_RESOURCE_CLIENT == null) {
- CustomResourceDefinition crd = OpenShifts.admin().apiextensions().v1().customResourceDefinitions()
- .withName(KEYCLOAK_RESOURCE).get();
- CustomResourceDefinitionContext crdc = CustomResourceDefinitionContext.fromCrd(crd);
- if (!getCustomResourceDefinitions().contains(KEYCLOAK_RESOURCE)) {
- throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
- KEYCLOAK_RESOURCE, OPERATOR_ID));
- }
- MixedOperation> crClient = OpenShifts
- .master().newHasMetadataOperation(crdc, Keycloak.class, KeycloakOperatorKeycloakList.class);
- KEYCLOAK_CUSTOM_RESOURCE_CLIENT = crClient.inNamespace(OpenShiftConfig.namespace());
- }
- return KEYCLOAK_CUSTOM_RESOURCE_CLIENT;
- }
-
- public NonNamespaceOperation> keycloakRealmImportClient() {
- if (KEYCLOAK_REALM_IMPORT_CUSTOM_RESOURCE_CLIENT == null) {
- CustomResourceDefinition crd = OpenShifts.admin().apiextensions().v1().customResourceDefinitions()
- .withName(KEYCLOAK_REALM_IMPORT_RESOURCE).get();
- CustomResourceDefinitionContext crdc = CustomResourceDefinitionContext.fromCrd(crd);
- if (!getCustomResourceDefinitions().contains(KEYCLOAK_REALM_IMPORT_RESOURCE)) {
- throw new RuntimeException(String.format("[%s] custom resource is not provided by [%s] operator.",
- KEYCLOAK_REALM_IMPORT_RESOURCE, OPERATOR_ID));
- }
- MixedOperation> crClient = OpenShifts
- .master()
- .newHasMetadataOperation(crdc, KeycloakRealmImport.class, KeycloakOperatorRealmImportList.class);
- KEYCLOAK_REALM_IMPORT_CUSTOM_RESOURCE_CLIENT = crClient.inNamespace(OpenShiftConfig.namespace());
- }
- return KEYCLOAK_REALM_IMPORT_CUSTOM_RESOURCE_CLIENT;
- }
-
- private static final String OPERATOR_ID = IntersmashConfig.keycloakOperatorPackageManifest();
- protected FailFastCheck ffCheck = () -> false;
-
- public KeycloakOperatorProvisioner(@NonNull KeycloakOperatorApplication application) {
- super(application, OPERATOR_ID);
- }
-
- public static String getOperatorId() {
- return OPERATOR_ID;
+ public KeycloakOperatorProvisioner(KeycloakOperatorApplication application) {
+ super(application, KeycloakOperatorProvisioner.OPERATOR_ID);
}
+ // =================================================================================================================
+ // Related to generic provisioning behavior
+ // =================================================================================================================
@Override
- protected String getOperatorCatalogSource() {
+ public String getOperatorCatalogSource() {
return IntersmashConfig.keycloakOperatorCatalogSource();
}
@Override
- protected String getOperatorIndexImage() {
+ public String getOperatorIndexImage() {
return IntersmashConfig.keycloakOperatorIndexImage();
}
@Override
- protected String getOperatorChannel() {
+ public String getOperatorChannel() {
return IntersmashConfig.keycloakOperatorChannel();
}
@Override
public void deploy() {
- ffCheck = FailFastUtils.getFailFastCheck(EventHelper.timeOfLastEventBMOrTestNamespaceOrEpoch(),
- getApplication().getName());
+ FailFastCheck ffCheck = () -> false;
// Keycloak Operator codebase contains the name of the Keycloak image to deploy: user can override Keycloak image to
// deploy using environment variables in Keycloak Operator Subscription
subscribe();
@@ -148,7 +106,7 @@ public void deploy() {
CertificatesUtils.CertificateAndKey certificateAndKey = CertificatesUtils
.generateSelfSignedCertificateAndKey(
getApplication().getKeycloak().getSpec().getHostname().getHostname().replaceFirst("[.].*$", ""),
- tlsSecretName);
+ tlsSecretName, this.client(), this.client().getNamespace());
// add config to keycloak
if (getApplication().getKeycloak().getSpec().getHttp() == null) {
Http http = new Http();
@@ -163,17 +121,15 @@ public void deploy() {
// 1. check externalDatabase exists
if (getApplication().getKeycloak().getSpec().getDb() != null) {
// 2. Service "spec.db.host" must be installed beforehand
- new SimpleWaiter(() -> OpenShiftProvisioner.openShift
- .getService(getApplication().getKeycloak().getSpec().getDb().getHost()) != null)
+ new SimpleWaiter(() -> this.client().services().withName(getApplication().getKeycloak().getSpec().getDb().getHost())
+ .get() != null)
.level(Level.DEBUG).waitFor();
}
// create custom resources
keycloakClient().createOrReplace(getApplication().getKeycloak());
- if (getApplication().getKeycloakRealmImports().size() > 0) {
- getApplication().getKeycloakRealmImports().stream()
- .forEach((i) -> keycloakRealmImportClient().resource(i).create());
- }
+ getApplication().getKeycloakRealmImports().stream()
+ .forEach(ri -> keycloakRealmImportClient().createOrReplace(ri));
// Wait for Keycloak (and PostgreSQL) to be ready
waitFor(getApplication().getKeycloak());
@@ -182,9 +138,9 @@ public void deploy() {
// check that route is up, only if there's a valid external URL available
URL externalUrl = getURL();
if ((getApplication().getKeycloak().getSpec().getInstances() > 0) && (externalUrl != null)) {
- WaitersUtil.routeIsUp(externalUrl.toExternalForm())
- .level(Level.DEBUG)
- .waitFor();
+ new SimpleWaiter(
+ () -> Https.getCode(getURL().toExternalForm()) != 503)
+ .reason("Wait until the route is ready to serve.");
}
}
@@ -193,9 +149,12 @@ public void waitFor(Keycloak keycloak) {
if (replicas > 0) {
// wait for >= 1 pods with label controller-revision-hash=keycloak-d86bb6ddc
String controllerRevisionHash = getStatefulSet().getStatus().getUpdateRevision();
- OpenShiftWaiters.get(OpenShiftProvisioner.openShift, ffCheck)
- .areExactlyNPodsReady(replicas.intValue(), "controller-revision-hash",
- controllerRevisionHash)
+ BooleanSupplier bs = () -> getPods().stream()
+ .filter(p -> p.getMetadata().getLabels().get("controller-revision-hash") != null
+ && p.getMetadata().getLabels().get("controller-revision-hash").equals(controllerRevisionHash))
+ .collect(Collectors.toList()).size() == replicas.intValue();
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2,
+ "Waiting for pods with label \"controller-revision-hash\"=" + controllerRevisionHash + " to be scaled")
.waitFor();
}
}
@@ -237,39 +196,20 @@ private void waitForKeycloakResourceReadiness() {
.reason("Wait for KeycloakRealmImports to be done.").level(Level.DEBUG).waitFor();
}
- /**
- * Get a reference to keycloak object. Use get() to get the actual object, or null in case it does not
- * exist on tested cluster.
- * @return A concrete {@link Resource} instance representing the {@link org.jboss.intersmash.provision.openshift.operator.keycloak.keycloak.Keycloak} resource definition
- */
- public Resource keycloak() {
- return keycloakClient()
- .withName(getApplication().getKeycloak().getMetadata().getName());
- }
-
- public List keycloakRealmImports() {
- return keycloakRealmImportClient().list().getItems()
- .stream().filter(
- realm -> getApplication().getKeycloakRealmImports().stream().map(
- ri -> ri.getMetadata().getName())
- .anyMatch(riName -> riName.equalsIgnoreCase(realm.getMetadata().getName())))
- .collect(Collectors.toList());
- }
-
/**
* @return the underlying StatefulSet which provisions the cluster
*/
- private StatefulSet getStatefulSet() {
- final String STATEFUL_SET_NAME = getApplication().getKeycloak().getMetadata().getName();
+ public StatefulSet getStatefulSet() {
+ final String statefulSetName = getApplication().getKeycloak().getMetadata().getName();
new SimpleWaiter(
- () -> Objects.nonNull(OpenShiftProvisioner.openShift.getStatefulSet(STATEFUL_SET_NAME)))
+ () -> Objects.nonNull(this.client().apps().statefulSets().withName(statefulSetName).get()))
.reason(
MessageFormat.format(
"Waiting for StatefulSet \"{0}\" to be created for Keycloak \"{1}\".",
- STATEFUL_SET_NAME,
+ statefulSetName,
getApplication().getKeycloak().getMetadata().getName()))
.level(Level.DEBUG).timeout(60000L).waitFor();
- return OpenShiftProvisioner.openShift.getStatefulSet(STATEFUL_SET_NAME);
+ return this.client().apps().statefulSets().withName(statefulSetName).get();
}
@Override
@@ -288,9 +228,17 @@ public void undeploy() {
.reason("Wait for Keycloak instances to be deleted.").level(Level.DEBUG).waitFor();
// wait for 0 pods
- OpenShiftWaiters.get(OpenShiftProvisioner.openShift, () -> false)
- .areExactlyNPodsReady(0, "app", getApplication().getKeycloak().getKind().toLowerCase()).level(Level.DEBUG)
+ BooleanSupplier bs = () -> this.client().pods().inNamespace(this.client().getNamespace()).list().getItems().stream()
+ .filter(p -> !com.google.common.base.Strings.isNullOrEmpty(p.getMetadata().getLabels().get("app"))
+ && p.getMetadata().getLabels().get("app")
+ .equals(getApplication().getKeycloak().getKind().toLowerCase()))
+ .collect(Collectors.toList()).isEmpty();
+ String reason = "Waiting for exactly 0 pods with label \"app\"="
+ + getApplication().getKeycloak().getKind().toLowerCase() + " to be ready.";
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2, reason)
+ .level(Level.DEBUG)
.waitFor();
+
unsubscribe();
}
@@ -302,8 +250,12 @@ public void scale(int replicas, boolean wait) {
tmpKeycloak.getSpec().setInstances(Integer.toUnsignedLong(replicas));
keycloak().replace(tmpKeycloak);
if (wait) {
- OpenShiftWaiters.get(OpenShiftProvisioner.openShift, ffCheck)
- .areExactlyNPodsReady(replicas, "controller-revision-hash", controllerRevisionHash)
+ BooleanSupplier bs = () -> getPods().stream()
+ .filter(p -> p.getMetadata().getLabels().get("controller-revision-hash") != null
+ && p.getMetadata().getLabels().get("controller-revision-hash").equals(controllerRevisionHash))
+ .collect(Collectors.toList()).size() == replicas;
+ new SimpleWaiter(bs, TimeUnit.MINUTES, 2,
+ "Waiting for pods with label \"controller-revision-hash\"=" + controllerRevisionHash + " to be scaled")
.level(Level.DEBUG)
.waitFor();
}
@@ -314,46 +266,99 @@ public void scale(int replicas, boolean wait) {
.reason("Wait for Keycloak resource to be ready").level(Level.DEBUG).waitFor();
// check that route is up
if (originalReplicas == 0 && replicas > 0) {
- WaitersUtil.routeIsUp(getURL().toExternalForm())
- .level(Level.DEBUG)
- .waitFor();
+ new SimpleWaiter(
+ () -> Https.getCode(getURL().toExternalForm()) != 503)
+ .reason("Wait until the route is ready to serve.");
}
}
- @Override
public List getPods() {
- String STATEFUL_SET_NAME = getApplication().getKeycloak().getMetadata().getName();
- StatefulSet statefulSet = OpenShiftProvisioner.openShift.getStatefulSet(STATEFUL_SET_NAME);
+ final String statefulSetName = getApplication().getKeycloak().getMetadata().getName();
+ StatefulSet statefulSet = this.client().apps().statefulSets().withName(statefulSetName).get();
return Objects.nonNull(statefulSet)
- ? OpenShiftProvisioner.openShift.getLabeledPods("controller-revision-hash",
- statefulSet.getStatus().getUpdateRevision())
+ ? getPods().stream()
+ .filter(p -> p.getMetadata().getLabels().get("controller-revision-hash") != null
+ && p.getMetadata().getLabels().get("controller-revision-hash")
+ .equals(statefulSet.getStatus().getUpdateRevision()))
+ .collect(Collectors.toList())
: Lists.emptyList();
}
- @Override
- public URL getURL() {
- String host = OpenShiftProvisioner.openShift.routes().list().getItems()
- .stream().filter(
- route -> route.getMetadata().getName().startsWith(
- keycloak().get().getMetadata().getName())
- &&
- route.getMetadata().getLabels().entrySet()
- .stream().filter(
- label -> label.getKey().equalsIgnoreCase("app.kubernetes.io/instance")
- &&
- label.getValue().equalsIgnoreCase(
- keycloak().get().getMetadata().getLabels()
- .get("app")))
- .count() == 1
-
- ).findFirst()
- .orElseThrow(() -> new RuntimeException(
- String.format("No route for Keycloak %s!", keycloak().get().getMetadata().getName())))
- .getSpec().getHost();
- try {
- return Strings.isNullOrEmpty(host) ? null : new URL(String.format("https://%s", host));
- } catch (MalformedURLException e) {
- throw new RuntimeException(String.format("Keycloak operator External URL \"%s\" is malformed.", host), e);
+ // =================================================================================================================
+ // Client related
+ // =================================================================================================================
+ // this is the packagemanifest for the operator;
+ // you can get it with command:
+ // oc get packagemanifest -o template --template='{{ .metadata.name }}'
+ public static String OPERATOR_ID = IntersmashConfig.keycloakOperatorPackageManifest();
+ // this is the name of the CustomResourceDefinition(s)
+ // you can get it with command:
+ // oc get crd > -o template --template='{{ .metadata.name }}'
+ public String KEYCLOACK_CRD_NAME = "keycloaks.k8s.keycloak.org";
+
+ public String KEYCLOACK_REALM_IMPORT_CRD_NAME = "keycloakrealmimports.k8s.keycloak.org";
+
+ /**
+ * Generic CRD client which is used by client builders default implementation to build the CRDs client
+ *
+ * @return A {@link NonNamespaceOperation} instance that represents a
+ */
+ public abstract NonNamespaceOperation> customResourceDefinitionsClient();
+
+ private static NonNamespaceOperation> KEYCLOAKS_CLIENT;
+ private static NonNamespaceOperation> KEYCLOAK_REALM_IMPORTS_CLIENT;
+
+ // keycloaks.k8s.keycloak.org
+ protected abstract HasMetadataOperationsImpl keycloaksCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
+
+ // keycloakrealmimports.k8s.keycloak.org
+ protected abstract HasMetadataOperationsImpl keycloakRealmImportsCustomResourcesClient(
+ CustomResourceDefinitionContext crdc);
+
+ public NonNamespaceOperation