diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..5598e88 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +/target/ +.idea +*.iml +*.ipr +.classpath +.settings +.project \ No newline at end of file diff --git a/.gitignore b/.gitignore index a1c2a23..63a41f0 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,11 @@ # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml hs_err_pid* + +/target/ +.idea +*.iml +*.ipr +.classpath +.settings +.project diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..e2da118 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,21 @@ +language: java +dist: xenial + +services: + - docker + +branches: + only: + - master + +install: + - set -e + - nohup docker pull localstack/localstack > /dev/null & + - nohup docker pull lambci/lambda:java8 > /dev/null & + +script: + - set -e + - make test + +notifications: + email: false diff --git a/LICENSE b/LICENSE.txt similarity index 100% rename from LICENSE rename to LICENSE.txt diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d6d15d9 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +ADDITIONAL_MVN_ARGS ?= -DskipTests -q + +usage: ## Show this help + @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' + +build: ## Build the code using Maven + mvn -Pfatjar $(ADDITIONAL_MVN_ARGS) clean javadoc:jar source:jar package $(ADDITIONAL_MVN_TARGETS) + +publish-maven: ## Publish artifacts to Maven Central + ADDITIONAL_MVN_TARGETS=deploy ADDITIONAL_MVN_ARGS=" " make build + +test: ## Run tests for Java/JUnit compatibility + USE_SSL=1 SERVICES=serverless,kinesis,sns,sqs,cloudwatch mvn $(MVN_ARGS) -q test + +.PHONY: usage clean install test diff --git a/README.md b/README.md index 6afd573..7cbcd68 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,26 @@ # localstack-java-client -Java utilities and JUnit integration for LocalStack + +Java utilities and JUnit integration for [LocalStack](https://github.com/localstack/localstack). + +## Usage + +For more details, please refer to the README of the main LocalStack repo: https://github.com/localstack/localstack + +## Prerequisites + +* Java +* Maven +* LocalStack + +## Building + +To build the latest version of the code via Maven: +``` +make build +``` + +## License + +Copyright (c) 2017-2020 LocalStack maintainers and contributors. + +This version of LocalStack is released under the Apache License, Version 2.0 (see LICENSE.txt). diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..968d4ba --- /dev/null +++ b/pom.xml @@ -0,0 +1,322 @@ + + 4.0.0 + + cloud.localstack + localstack-utils + jar + 0.2.0 + localstack-utils + + Java utilities for the LocalStack platform. + http://localstack.cloud + + + whummer + Waldemar Hummer + + + + + Apache License 2.0 + https://www.apache.org/licenses/LICENSE-2.0 + + + + https://github.com/localstack/localstack + + + + UTF-8 + UTF-8 + 1.8 + 1.8 + 1.11.642 + + + + + junit + junit + 4.12 + true + + + org.junit.jupiter + junit-jupiter-api + 5.5.2 + true + + + org.apache.commons + commons-lang3 + 3.5 + + + net.java.dev.jna + jna + 4.1.0 + compile + + + org.jvnet.winp + winp + 1.23 + + + org.projectlombok + lombok + 1.18.4 + provided + + + + javax.xml.bind + jaxb-api + 2.3.1 + provided + + + commons-logging + commons-logging + 1.2 + + + + + com.amazonaws + aws-java-sdk + ${aws.sdk.version} + provided + + + com.amazonaws + aws-lambda-java-core + 1.2.0 + + + com.amazonaws + aws-lambda-java-events + 2.2.7 + + + com.amazonaws + aws-java-sdk-core + ${aws.sdk.version} + + + com.amazonaws + aws-java-sdk-lambda + ${aws.sdk.version} + + + com.amazonaws + aws-java-sdk-sqs + ${aws.sdk.version} + + + + + + com.amazonaws + amazon-sqs-java-messaging-lib + 1.0.5 + jar + test + + + org.testcontainers + testcontainers + 1.12.5 + test + + + commons-io + commons-io + 2.6 + test + + + ch.qos.logback + logback-classic + 1.0.13 + test + + + org.assertj + assertj-core + 3.9.0 + test + + + + + + fatjar + + + com.amazonaws + aws-java-sdk + 1.11.505 + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.0.1 + + none + + + + org.apache.maven.plugins + maven-shade-plugin + 3.1.1 + + + package + + shade + + + + + false + false + true + fat + + + com.amazonaws:aws-java-sdk-core + com.amazonaws:aws-java-sdk-kinesis + com.amazonaws:aws-java-sdk-lambda + com.amazonaws:aws-java-sdk-sqs + com.amazonaws:aws-java-sdk-s3 + com.amazonaws:aws-lambda-java-events + com.amazonaws:aws-lambda-java-core + commons-*:* + net.*:* + org.apache.*:* + org.jvnet.*:* + org.apiguardian:* + org.opentest4j:* + com.fasterxml.*:* + joda-time:* + com.jayway.*:* + software.*:* + + + + + + + org.joda + com.amazonaws.thirdparty.joda + + + com.fasterxml.jackson + com.amazonaws.thirdparty.jackson + + + org.apache.http + com.amazonaws.thirdparty.apache.http + + + org.apache.commons + com.amazonaws.thirdparty.apache.commons + + + software.amazon.ion + com.amazonaws.thirdparty.ion + + + + + + org.apache.maven.plugins + maven-jar-plugin + 3.0.2 + + + + test-jar + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.0.1 + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.21.0 + + + org.apache.maven.surefire + surefire-junit4 + 2.21.0 + + + org.junit.platform + junit-platform-surefire-provider + 1.2.0 + + + org.junit.jupiter + junit-jupiter-engine + 5.5.2 + + + + + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://oss.sonatype.org/service/local/staging/deploy/maven2/ + + + diff --git a/src/main/java/cloud/localstack/Constants.java b/src/main/java/cloud/localstack/Constants.java new file mode 100644 index 0000000..aaaabdd --- /dev/null +++ b/src/main/java/cloud/localstack/Constants.java @@ -0,0 +1,35 @@ +package cloud.localstack; + +import java.util.HashMap; +import java.util.Map; + +public class Constants { + public static final Map DEFAULT_PORTS = new HashMap<>(); + + static { + DEFAULT_PORTS.put("apigateway", 4567); + DEFAULT_PORTS.put("kinesis", 4568); + DEFAULT_PORTS.put("dynamodb", 4569); + DEFAULT_PORTS.put("dynamodbstreams", 4570); + DEFAULT_PORTS.put("s3", 4572); + DEFAULT_PORTS.put("firehose", 4573); + DEFAULT_PORTS.put("lambda", 4574); + DEFAULT_PORTS.put("sns", 4575); + DEFAULT_PORTS.put("sqs", 4576); + DEFAULT_PORTS.put("redshift", 4577); + DEFAULT_PORTS.put("es", 4578); + DEFAULT_PORTS.put("ses", 4579); + DEFAULT_PORTS.put("route53", 4580); + DEFAULT_PORTS.put("cloudformation", 4581); + DEFAULT_PORTS.put("cloudwatch", 4582); + DEFAULT_PORTS.put("ssm", 4583); + DEFAULT_PORTS.put("secretsmanager", 4584); + DEFAULT_PORTS.put("stepfunctions", 4585); + DEFAULT_PORTS.put("logs", 4586); + DEFAULT_PORTS.put("events", 4587); + DEFAULT_PORTS.put("sts", 4592); + DEFAULT_PORTS.put("iam", 4593); + DEFAULT_PORTS.put("ec2", 4597); + DEFAULT_PORTS.put("kms", 4599); + } +} diff --git a/src/main/java/cloud/localstack/LambdaContext.java b/src/main/java/cloud/localstack/LambdaContext.java new file mode 100644 index 0000000..20940c3 --- /dev/null +++ b/src/main/java/cloud/localstack/LambdaContext.java @@ -0,0 +1,80 @@ +package cloud.localstack; + +import com.amazonaws.services.lambda.runtime.ClientContext; +import com.amazonaws.services.lambda.runtime.CognitoIdentity; +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.LambdaLogger; + +import java.util.logging.Level; +import java.util.logging.Logger; + +public class LambdaContext implements Context { + + private final Logger LOG = Logger.getLogger(LambdaContext.class.getName()); + + public LambdaLogger getLogger() { + return new LambdaLogger() { + + @Override + public void log(String msg) { + LOG.log(Level.INFO, msg); + } + + @Override + public void log(byte[] msg) { + log(new String(msg)); + } + }; + } + + public String getAwsRequestId() { + // TODO Auto-generated method stub + return null; + } + + public ClientContext getClientContext() { + // TODO Auto-generated method stub + return null; + } + + public String getFunctionName() { + // TODO Auto-generated method stub + return null; + } + + public String getFunctionVersion() { + // TODO Auto-generated method stub + return null; + } + + public CognitoIdentity getIdentity() { + // TODO Auto-generated method stub + return null; + } + + public String getInvokedFunctionArn() { + // TODO Auto-generated method stub + return null; + } + + public String getLogGroupName() { + // TODO Auto-generated method stub + return null; + } + + public String getLogStreamName() { + // TODO Auto-generated method stub + return null; + } + + public int getMemoryLimitInMB() { + // TODO Auto-generated method stub + return 0; + } + + public int getRemainingTimeInMillis() { + // TODO Auto-generated method stub + return 0; + } + +} diff --git a/src/main/java/cloud/localstack/LambdaExecutor.java b/src/main/java/cloud/localstack/LambdaExecutor.java new file mode 100644 index 0000000..4d5e2a7 --- /dev/null +++ b/src/main/java/cloud/localstack/LambdaExecutor.java @@ -0,0 +1,174 @@ +package cloud.localstack; + +import cloud.localstack.lambda.DDBEventParser; +import cloud.localstack.lambda.S3EventParser; +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.amazonaws.services.lambda.runtime.RequestStreamHandler; +import com.amazonaws.services.lambda.runtime.events.KinesisEvent; +import com.amazonaws.services.lambda.runtime.events.KinesisEvent.KinesisEventRecord; +import com.amazonaws.services.lambda.runtime.events.KinesisEvent.Record; +import com.amazonaws.services.lambda.runtime.events.SNSEvent; +import com.amazonaws.services.lambda.runtime.events.SQSEvent; +import com.amazonaws.util.StringInputStream; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.StringUtils; +import org.joda.time.DateTime; + +import java.io.ByteArrayOutputStream; +import java.io.OutputStream; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Base64; +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Simple implementation of a Java Lambda function executor. + * + * @author Waldemar Hummer + */ +@SuppressWarnings("restriction") +public class LambdaExecutor { + + @SuppressWarnings("unchecked") + public static void main(String[] args) throws Exception { + if(args.length < 2) { + System.err.println("Usage: java " + LambdaExecutor.class.getSimpleName() + + " "); + System.exit(1); + } + + String fileContent = readFile(args[1]); + ObjectMapper reader = new ObjectMapper(); + reader.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true); + reader.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Map map = reader.readerFor(Map.class).readValue(fileContent); + + List> records = (List>) get(map, "Records"); + Object inputObject = map; + + Object handler = getHandler(args[0]); + if (records == null) { + Optional deserialisedInput = getInputObject(reader, fileContent, handler); + if (deserialisedInput.isPresent()) { + inputObject = deserialisedInput.get(); + } + } else { + if (records.stream().anyMatch(record -> record.containsKey("kinesis") || record.containsKey("Kinesis"))) { + KinesisEvent kinesisEvent = new KinesisEvent(); + inputObject = kinesisEvent; + kinesisEvent.setRecords(new LinkedList<>()); + for (Map record : records) { + KinesisEventRecord r = new KinesisEventRecord(); + kinesisEvent.getRecords().add(r); + Record kinesisRecord = new Record(); + Map kinesis = (Map) get(record, "Kinesis"); + String dataString = new String(get(kinesis, "Data").toString().getBytes()); + byte[] decodedData = Base64.getDecoder().decode(dataString); + kinesisRecord.setData(ByteBuffer.wrap(decodedData)); + kinesisRecord.setPartitionKey((String) get(kinesis, "PartitionKey")); + kinesisRecord.setApproximateArrivalTimestamp(new Date()); + r.setKinesis(kinesisRecord); + } + } else if (records.stream().anyMatch(record -> record.containsKey("Sns"))) { + SNSEvent snsEvent = new SNSEvent(); + inputObject = snsEvent; + snsEvent.setRecords(new LinkedList<>()); + for (Map record : records) { + SNSEvent.SNSRecord r = new SNSEvent.SNSRecord(); + snsEvent.getRecords().add(r); + SNSEvent.SNS snsRecord = new SNSEvent.SNS(); + Map sns = (Map) get(record, "Sns"); + snsRecord.setMessage((String) get(sns, "Message")); + snsRecord.setMessageAttributes((Map) get(sns, "MessageAttributes")); + snsRecord.setType("Notification"); + snsRecord.setTimestamp(new DateTime()); + r.setSns(snsRecord); + } + } else if (records.stream().filter(record -> record.containsKey("dynamodb")).count() > 0) { + inputObject = DDBEventParser.parse(records); + } else if (records.stream().anyMatch(record -> record.containsKey("s3"))) { + inputObject = S3EventParser.parse(records); + } else if (records.stream().anyMatch(record -> record.containsKey("sqs"))) { + inputObject = reader.readValue(fileContent, SQSEvent.class); + } + } + + Context ctx = new LambdaContext(); + if (handler instanceof RequestHandler) { + Object result = ((RequestHandler) handler).handleRequest(inputObject, ctx); + // try turning the output into json + try { + result = new ObjectMapper().writeValueAsString(result); + } catch (JsonProcessingException jsonException) { + // continue with results as it is + } + // The contract with lambci is to print the result to stdout, whereas logs go to stderr + System.out.println(result); + } else if (handler instanceof RequestStreamHandler) { + OutputStream os = new ByteArrayOutputStream(); + ((RequestStreamHandler) handler).handleRequest( + new StringInputStream(fileContent), os, ctx); + System.out.println(os); + } + } + + private static Optional getInputObject(ObjectMapper mapper, String objectString, Object handler) { + Optional inputObject = Optional.empty(); + try { + Optional handlerInterface = Arrays.stream(handler.getClass().getGenericInterfaces()) + .filter(genericInterface -> + ((ParameterizedType) genericInterface).getRawType().equals(RequestHandler.class)) + .findFirst(); + if (handlerInterface.isPresent()) { + Class handlerInputType = Class.forName(((ParameterizedType) handlerInterface.get()) + .getActualTypeArguments()[0].getTypeName()); + inputObject = Optional.of(mapper.readerFor(handlerInputType).readValue(objectString)); + } + } catch (Exception genericException) { + // do nothing + } + return inputObject; + } + + private static Object getHandler(String handlerName) throws NoSuchMethodException, IllegalAccessException, + InvocationTargetException, InstantiationException, ClassNotFoundException { + Class clazz = Class.forName(handlerName); + return clazz.getConstructor().newInstance(); + } + + public static T get(Map map, String key) { + T result = map.get(key); + if(result != null) { + return result; + } + key = StringUtils.uncapitalize(key); + result = map.get(key); + if(result != null) { + return result; + } + return map.get(key.toLowerCase()); + } + + public static String readFile(String file) throws Exception { + if(!file.startsWith("/")) { + file = System.getProperty("user.dir") + "/" + file; + } + return Files.lines(Paths.get(file), StandardCharsets.UTF_8).collect(Collectors.joining()); + } + +} diff --git a/src/main/java/cloud/localstack/Localstack.java b/src/main/java/cloud/localstack/Localstack.java new file mode 100644 index 0000000..afc6f98 --- /dev/null +++ b/src/main/java/cloud/localstack/Localstack.java @@ -0,0 +1,235 @@ +package cloud.localstack; + +import cloud.localstack.ServiceName; +import cloud.localstack.docker.*; +import cloud.localstack.docker.command.*; +import cloud.localstack.docker.annotation.LocalstackDockerConfiguration; +import cloud.localstack.docker.exception.LocalstackDockerException; +import lombok.Getter; +import lombok.Setter; + +import java.util.*; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Localstack Docker instance + * + * @author Alan Bevier + * @author fabianoo + */ +public class Localstack { + + private static final Logger LOG = Logger.getLogger(Localstack.class.getName()); + + private static final String PORT_CONFIG_FILENAME = "/opt/code/localstack/" + + ".venv/lib/python3.8/site-packages/localstack_client/config.py"; + + private static final Pattern READY_TOKEN = Pattern.compile("Ready\\."); + + //Regular expression used to parse localstack config to determine default ports for services + private static final Pattern DEFAULT_PORT_PATTERN = Pattern.compile("'(\\w+)'\\Q: '{proto}://{host}:\\E(\\d+)'"); + + private static final int SERVICE_NAME_GROUP = 1; + + private static final int PORT_GROUP = 2; + + public static final String ENV_CONFIG_USE_SSL = "USE_SSL"; + + private Container localStackContainer; + + /** + * This is a mapping from service name to internal ports. In order to use them, the + * internal port must be resolved to an external docker port via Container.getExternalPortFor() + */ + private static Map serviceToPortMap; + + private static boolean locked = false; + + public static final Localstack INSTANCE = new Localstack(); + + private String externalHostName; + + static { + // make sure we avoid any errors related to locally generated SSL certificates + TestUtils.disableSslCertChecking(); + } + + private Localstack() { } + + public void startup(LocalstackDockerConfiguration dockerConfiguration) { + if (locked) { + throw new IllegalStateException("A docker instance is starting or already started."); + } + locked = true; + this.externalHostName = dockerConfiguration.getExternalHostName(); + + try { + localStackContainer = Container.createLocalstackContainer( + dockerConfiguration.getExternalHostName(), + dockerConfiguration.isPullNewImage(), + dockerConfiguration.isRandomizePorts(), + dockerConfiguration.getImageTag(), + dockerConfiguration.getEnvironmentVariables(), + dockerConfiguration.getPortMappings() + ); + loadServiceToPortMap(); + + LOG.info("Waiting for LocalStack container to be ready..."); + localStackContainer.waitForLogToken(READY_TOKEN); + } catch (Exception t) { + if (t.toString().contains("port is already allocated") && dockerConfiguration.isIgnoreDockerRunErrors()) { + LOG.info("Ignoring port conflict when starting Docker container, due to ignoreDockerRunErrors=true"); + localStackContainer = Container.getRunningLocalstackContainer(); + loadServiceToPortMap(); + return; + } + this.stop(); + throw new LocalstackDockerException("Could not start the localstack docker container.", t); + } + } + + public void stop() { + if (this.localStackContainer != null) { + localStackContainer.stop(); + } + locked = false; + } + + private void loadServiceToPortMap() { + String localStackPortConfig = localStackContainer.executeCommand(Arrays.asList("cat", PORT_CONFIG_FILENAME)); + + Map ports = new RegexStream(DEFAULT_PORT_PATTERN.matcher(localStackPortConfig)).stream() + .collect(Collectors.toMap(match -> match.group(SERVICE_NAME_GROUP), + match -> Integer.parseInt(match.group(PORT_GROUP)))); + + serviceToPortMap = Collections.unmodifiableMap(ports); + } + + public String getEndpointS3() { + String s3Endpoint = endpointForService(ServiceName.S3); + /* + * Use the domain name wildcard *.localhost.atlassian.io which maps to 127.0.0.1 + * We need to do this because S3 SDKs attempt to access a domain . + * which by default would result in .localhost, but that name cannot be resolved + * (unless hardcoded in /etc/hosts) + */ + s3Endpoint = s3Endpoint.replace("localhost", "test.localhost.atlassian.io"); + return s3Endpoint; + } + + public String getEndpointKinesis() { + return endpointForService(ServiceName.KINESIS); + } + + public String getEndpointLambda() { + return endpointForService(ServiceName.LAMBDA); + } + + public String getEndpointDynamoDB() { + return endpointForService(ServiceName.DYNAMO); + } + + public String getEndpointDynamoDBStreams() { + return endpointForService(ServiceName.DYNAMO_STREAMS); + } + + public String getEndpointAPIGateway() { + return endpointForService(ServiceName.API_GATEWAY); + } + + public String getEndpointElasticsearch() { + return endpointForService(ServiceName.ELASTICSEARCH); + } + + public String getEndpointElasticsearchService() { + return endpointForService(ServiceName.ELASTICSEARCH_SERVICE); + } + + public String getEndpointFirehose() { + return endpointForService(ServiceName.FIREHOSE); + } + + public String getEndpointSNS() { + return endpointForService(ServiceName.SNS); + } + + public String getEndpointSQS() { + return endpointForService(ServiceName.SQS); + } + + public String getEndpointRedshift() { + return endpointForService(ServiceName.REDSHIFT); + } + + public String getEndpointCloudWatch() { + return endpointForService(ServiceName.CLOUDWATCH); + } + + public String getEndpointSES() { + return endpointForService(ServiceName.SES); + } + + public String getEndpointRoute53() { + return endpointForService(ServiceName.ROUTE53); + } + + public String getEndpointCloudFormation() { + return endpointForService(ServiceName.CLOUDFORMATION); + } + + public String getEndpointSSM() { + return endpointForService(ServiceName.SSM); + } + + public String getEndpointSecretsmanager() { + return endpointForService(ServiceName.SECRETSMANAGER); + } + + public String getEndpointEC2() { + return endpointForService(ServiceName.EC2); + } + + public String getEndpointStepFunctions() { return endpointForService(ServiceName.STEPFUNCTIONS); } + + public String endpointForService(String serviceName) { + if (serviceToPortMap == null) { + throw new IllegalStateException("Service to port mapping has not been determined yet."); + } + + if (!serviceToPortMap.containsKey(serviceName)) { + throw new IllegalArgumentException("Unknown port mapping for service: " + serviceName); + } + + int internalPort = serviceToPortMap.get(serviceName); + return endpointForPort(internalPort); + } + + public String endpointForPort(int port) { + if (localStackContainer != null) { + int externalPort = localStackContainer.getExternalPortFor(port); + String protocol = useSSL() ? "https" : "http"; + return String.format("%s://%s:%s", protocol, externalHostName, externalPort); + } + + throw new RuntimeException("Container not started"); + } + + public Container getLocalStackContainer() { + return localStackContainer; + } + + public static boolean useSSL() { + return isEnvConfigSet(ENV_CONFIG_USE_SSL); + } + + public static boolean isEnvConfigSet(String configName) { + String value = System.getenv(configName); + return value != null && !Arrays.asList("false", "0", "").contains(value.trim()); + } + + public static String getDefaultRegion() { + return TestUtils.DEFAULT_REGION; + } +} diff --git a/src/main/java/cloud/localstack/LocalstackTestRunner.java b/src/main/java/cloud/localstack/LocalstackTestRunner.java new file mode 100644 index 0000000..8ef26b8 --- /dev/null +++ b/src/main/java/cloud/localstack/LocalstackTestRunner.java @@ -0,0 +1,48 @@ +package cloud.localstack; + +import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor; +import cloud.localstack.docker.annotation.LocalstackDockerConfiguration; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.model.InitializationError; + +/** + * JUnit test runner that automatically pulls and runs the latest localstack docker image + * and then terminates when tests are complete. + * + * Having docker installed is a prerequisite for this test runner to execute. If docker + * is not installed in one of the default locations (C:\program files\docker\docker\resources\bin\, usr/local/bin or + * usr/bin) + * then use the DOCKER_LOCATION environment variable to specify the location. + * + * Since ports are dynamically allocated, the external port needs to be resolved based on the default localstack port. + * + * The hostname defaults to localhost, but in some environments that is not sufficient, so the HostName can be specified + * by using the LocalstackDockerProperties annotation with an IHostNameResolver. + * + * @author Alan Bevier + * @author Patrick Allain + * @author Waldemar Hummer + */ +public class LocalstackTestRunner extends BlockJUnit4ClassRunner { + + private static final LocalstackDockerAnnotationProcessor PROCESSOR = new LocalstackDockerAnnotationProcessor(); + + private Localstack localstackDocker = Localstack.INSTANCE; + + public LocalstackTestRunner(Class klass) throws InitializationError { + super(klass); + } + + @Override + public void run(RunNotifier notifier) { + try { + final LocalstackDockerConfiguration dockerConfig = PROCESSOR.process(this.getTestClass().getJavaClass()); + localstackDocker.startup(dockerConfig); + super.run(notifier); + } finally { + localstackDocker.stop(); + } + } + +} \ No newline at end of file diff --git a/src/main/java/cloud/localstack/ServiceName.java b/src/main/java/cloud/localstack/ServiceName.java new file mode 100644 index 0000000..b60bbc5 --- /dev/null +++ b/src/main/java/cloud/localstack/ServiceName.java @@ -0,0 +1,25 @@ +package cloud.localstack; + +public class ServiceName { + public static final String API_GATEWAY = "apigateway"; + public static final String KINESIS = "kinesis"; + public static final String DYNAMO = "dynamodb"; + public static final String DYNAMO_STREAMS = "dynamodbstreams"; + public static final String ELASTICSEARCH = "elasticsearch"; + public static final String S3 = "s3"; + public static final String FIREHOSE = "firehose"; + public static final String LAMBDA = "lambda"; + public static final String SNS = "sns"; + public static final String SQS = "sqs"; + public static final String REDSHIFT = "redshift"; + public static final String ELASTICSEARCH_SERVICE = "es"; + public static final String SES = "ses"; + public static final String ROUTE53 = "route53"; + public static final String CLOUDFORMATION = "cloudformation"; + public static final String CLOUDWATCH = "cloudwatch"; + public static final String SSM = "ssm"; + public static final String SECRETSMANAGER = "secretsmanager"; + public static final String STEPFUNCTIONS = "stepfunctions"; + public static final String EC2 = "ec2"; + +} diff --git a/src/main/java/cloud/localstack/TestUtils.java b/src/main/java/cloud/localstack/TestUtils.java new file mode 100644 index 0000000..fc9c1d8 --- /dev/null +++ b/src/main/java/cloud/localstack/TestUtils.java @@ -0,0 +1,301 @@ +package cloud.localstack; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.client.builder.ExecutorFactory; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBStreams; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBStreamsClientBuilder; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisAsync; +import com.amazonaws.services.kinesis.AmazonKinesisAsyncClientBuilder; +import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.AWSLambdaAsync; +import com.amazonaws.services.lambda.AWSLambdaAsyncClientBuilder; +import com.amazonaws.services.lambda.AWSLambdaClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.AmazonSNSAsync; +import com.amazonaws.services.sns.AmazonSNSAsyncClientBuilder; +import com.amazonaws.services.sns.AmazonSNSClientBuilder; +import com.amazonaws.services.sqs.*; +import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSClientBuilder; +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.channels.FileChannel; +import java.nio.file.CopyOption; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +@SuppressWarnings("all") +public class TestUtils { + + public static final String DEFAULT_REGION = "us-east-1"; + public static final String TEST_ACCESS_KEY = "test"; + public static final String TEST_SECRET_KEY = "test"; + public static final AWSCredentials TEST_CREDENTIALS = new BasicAWSCredentials(TEST_ACCESS_KEY, TEST_SECRET_KEY); + + private static final String[] EXCLUDED_DIRECTORIES = { + ".github", ".git", ".idea", ".venv", "target", "node_modules" + }; + + public static void setEnv(String key, String value) { + Map newEnv = new HashMap(System.getenv()); + newEnv.put(key, value); + setEnv(newEnv); + } + + public static AmazonSQS getClientSQS() { + return getClientSQS(null); + } + + public static AmazonSQS getClientSQS(String endpoint) { + endpoint = endpoint == null ? Localstack.INSTANCE.getEndpointSQS() : endpoint; + return AmazonSQSClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfiguration(endpoint)). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSQSAsync getClientSQSAsync() { + return AmazonSQSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSQS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSQSAsync getClientSQSAsync(final ExecutorFactory executorFactory) { + return AmazonSQSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSQS()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNS getClientSNS() { + return AmazonSNSClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNSAsync getClientSNSAsync() { + return AmazonSNSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNSAsync getClientSNSAsync(final ExecutorFactory executorFactory) { + return AmazonSNSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambda getClientLambda() { + return AWSLambdaClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambdaAsync getClientLambdaAsync() { + return AWSLambdaAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambdaAsync getClientLambdaAsync(final ExecutorFactory executorFactory) { + return AWSLambdaAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonS3 getClientS3() { + AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationS3()). + withCredentials(getCredentialsProvider()); + builder.setPathStyleAccessEnabled(true); + return builder.build(); + } + + public static AWSSecretsManager getClientSecretsManager() { + return AWSSecretsManagerClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSecretsManager()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonDynamoDB getClientDynamoDB() { + return AmazonDynamoDBClientBuilder.standard() + .withEndpointConfiguration(getEndpointConfigurationDynamoDB()) + .withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonDynamoDBStreams getClientDynamoDBStreams() { + return AmazonDynamoDBStreamsClientBuilder.standard() + .withEndpointConfiguration(getEndpointConfigurationDynamoDBStreams()) + .withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesis getClientKinesis() { + return AmazonKinesisClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesisAsync getClientKinesisAsync() { + return AmazonKinesisAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesisAsync getClientKinesisAsync(final ExecutorFactory executorFactory) { + return AmazonKinesisAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonCloudWatch getClientCloudWatch() { + return AmazonCloudWatchClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationCloudWatch()). + withCredentials(getCredentialsProvider()).build(); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationLambda() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointLambda()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationKinesis() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointKinesis()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationDynamoDB() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointDynamoDB()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationDynamoDBStreams() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointDynamoDBStreams()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSQS() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointSQS()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationS3() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointS3()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSNS() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointSNS()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationCloudWatch() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointCloudWatch()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSecretsManager() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointSecretsmanager()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationStepFunctions() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointStepFunctions()); + } + + protected static void setEnv(Map newEnv) { + try { + Class processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); + Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); + theEnvironmentField.setAccessible(true); + Map env = (Map) theEnvironmentField.get(null); + env.putAll(newEnv); + Field theCaseInsensitiveEnvironmentField = processEnvironmentClass + .getDeclaredField("theCaseInsensitiveEnvironment"); + theCaseInsensitiveEnvironmentField.setAccessible(true); + Map cienv = (Map) theCaseInsensitiveEnvironmentField.get(null); + cienv.putAll(newEnv); + } catch (NoSuchFieldException e) { + try { + Class[] classes = Collections.class.getDeclaredClasses(); + Map env = System.getenv(); + for (Class cl : classes) { + if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { + Field field = cl.getDeclaredField("m"); + field.setAccessible(true); + Object obj = field.get(env); + Map map = (Map) obj; + map.clear(); + map.putAll(newEnv); + } + } + } catch (Exception e2) { + e2.printStackTrace(); + } + } catch (Exception e1) { + e1.printStackTrace(); + } + } + + public static void disableSslCertChecking() { + System.setProperty("com.amazonaws.sdk.disableCertChecking", "true"); + } + + public static void copyFolder(Path src, Path dest) throws IOException { + try(Stream stream = Files.walk(src)) { + stream.forEach(source -> { + boolean isExcluded = Arrays.stream(EXCLUDED_DIRECTORIES) + .anyMatch( excluded -> source.toAbsolutePath().toString().contains(excluded)); + if (!isExcluded) { + copy(source, dest.resolve(src.relativize(source))); + } + }); + } + } + + public static void copy(Path source, Path dest) { + try { + CopyOption[] options = new CopyOption[] {StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING}; + if(Files.isDirectory(dest)) { + // continue without copying + return; + } + if (Files.exists(dest)) { + try(FileChannel sourceFile = FileChannel.open(source)) { + try (FileChannel destFile = FileChannel.open(dest)) { + if (!Files.getLastModifiedTime(source).equals(Files.getLastModifiedTime(dest)) + || sourceFile.size() != destFile.size() + ) { + Files.copy(source, dest, options); + } + } + } + } else { + Files.copy(source, dest, options); + } + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + public static AWSCredentialsProvider getCredentialsProvider() { + return new AWSStaticCredentialsProvider(TEST_CREDENTIALS); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfiguration(String endpointURL) { + return new AwsClientBuilder.EndpointConfiguration(endpointURL, DEFAULT_REGION); + } + +} diff --git a/src/main/java/cloud/localstack/deprecated/Localstack.java b/src/main/java/cloud/localstack/deprecated/Localstack.java new file mode 100644 index 0000000..2ccb96f --- /dev/null +++ b/src/main/java/cloud/localstack/deprecated/Localstack.java @@ -0,0 +1,478 @@ +package cloud.localstack.deprecated; + +import com.amazonaws.util.IOUtils; +import org.ow2.proactive.process_tree_killer.ProcessTree; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import cloud.localstack.Constants; +import cloud.localstack.ServiceName; + +/** + * Singleton class that automatically downloads, installs, starts, + * and stops the LocalStack local cloud infrastructure components. + * + * Should work cross-OS, however has been only tested under Unix (Linux/MacOS). + * + * @author Waldemar Hummer + * @author Patrick Allain - 5/3/18. + */ +@Deprecated +public class Localstack { + + /** Single instance... */ + protected static final Localstack INSTANCE = new Localstack(); + + private static final Logger LOG = Logger.getLogger(Localstack.class.getName()); + + private static final AtomicReference INFRA_STARTED = new AtomicReference(); + + private static final String INFRA_READY_MARKER = "Ready."; + + private static final String TMP_INSTALL_DIR = System.getProperty("java.io.tmpdir") + + File.separator + "localstack_install_dir"; + + private static final String CURRENT_DEV_DIR; + + private static boolean DEV_ENVIRONMENT; + + private static final String ADDITIONAL_PATH = "/usr/local/bin/"; + + private static final String LOCALSTACK_REPO_URL = "https://github.com/localstack/localstack"; + + public static final String ENV_CONFIG_USE_SSL = "USE_SSL"; + + public static final String ENV_DEBUG = "DEBUG"; + + public static final String ENV_LOCALSTACK_HOSTNAME = "LOCALSTACK_HOSTNAME"; + + private static final String ENV_LOCALSTACK_PROCESS_GROUP = "ENV_LOCALSTACK_PROCESS_GROUP"; + + static { + // Determine if we are running in a development environment for localstack + Path currentDirectory = Paths.get(".").toAbsolutePath().getParent(); + Path localstackDir = Optional.ofNullable(currentDirectory) + .map(Path::getParent).map(Path::getParent).map(Path::getParent).orElse(null); + if( currentDirectory != null && localstackDir != null + && currentDirectory.getFileName().toString().equals("java") + && localstackDir.getFileName().toString().equals("localstack")) { + CURRENT_DEV_DIR = localstackDir.toString(); + Path gitConfig = Paths.get(CURRENT_DEV_DIR, ".git", "config"); + + if(Files.exists(gitConfig)) { + setIsDevEnvironment(gitConfig); + } else { + DEV_ENVIRONMENT = false; + } + } else { + CURRENT_DEV_DIR = currentDirectory.toAbsolutePath().toString(); + DEV_ENVIRONMENT = false; + } + } + + private Localstack() { } + + public static boolean isDevEnvironment() { + return DEV_ENVIRONMENT; + } + + private static void setIsDevEnvironment(Path gitConfig) { + Pattern remoteOrigin = Pattern.compile("^\\[remote \"origin\"]"); + Pattern localstackGit = Pattern.compile(".+\\/localstack\\.git$"); + boolean remoteOriginFound = false; + try { + try(Stream lines = Files.lines(gitConfig)){ + for(String line : lines.collect(Collectors.toList())) { + if(remoteOriginFound) { + if(localstackGit.matcher(line).matches()) { + DEV_ENVIRONMENT = true; + } else { + DEV_ENVIRONMENT = false; + } + break; + } + + if( remoteOrigin.matcher(line).matches() ) { + remoteOriginFound = true; + } + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /* SERVICE ENDPOINTS */ + + public static String getEndpointS3() { + return getEndpointS3(false); + } + + public static String getEndpointS3(boolean overrideSSL) { + String s3Endpoint = ensureInstallationAndGetEndpoint(ServiceName.S3, overrideSSL); + /* + * Use the domain name wildcard *.localhost.atlassian.io which maps to 127.0.0.1 + * We need to do this because S3 SDKs attempt to access a domain . + * which by default would result in .localhost, but that name cannot be resolved + * (unless hardcoded in /etc/hosts) + */ + s3Endpoint = s3Endpoint.replace("localhost", "test.localhost.atlassian.io"); + return s3Endpoint; + } + + public static String getEndpointKinesis() { + return ensureInstallationAndGetEndpoint(ServiceName.KINESIS); + } + + public static String getEndpointLambda() { + return ensureInstallationAndGetEndpoint(ServiceName.LAMBDA); + } + + public static String getEndpointDynamoDB() { + return ensureInstallationAndGetEndpoint(ServiceName.DYNAMO); + } + + public static String getEndpointDynamoDBStreams() { + return ensureInstallationAndGetEndpoint(ServiceName.DYNAMO_STREAMS); + } + + public static String getEndpointAPIGateway() { + return ensureInstallationAndGetEndpoint(ServiceName.API_GATEWAY); + } + + public static String getEndpointElasticsearch() { + return ensureInstallationAndGetEndpoint(ServiceName.ELASTICSEARCH); + } + + public static String getEndpointElasticsearchService() { + return ensureInstallationAndGetEndpoint(ServiceName.ELASTICSEARCH_SERVICE); + } + + public static String getEndpointFirehose() { + return ensureInstallationAndGetEndpoint(ServiceName.FIREHOSE); + } + + public static String getEndpointSNS() { + return ensureInstallationAndGetEndpoint(ServiceName.SNS); + } + + public static String getEndpointSQS() { + return ensureInstallationAndGetEndpoint(ServiceName.SQS); + } + + public static String getEndpointRedshift() { + return ensureInstallationAndGetEndpoint(ServiceName.REDSHIFT); + } + + public static String getEndpointSES() { + return ensureInstallationAndGetEndpoint(ServiceName.SES); + } + + public static String getEndpointRoute53() { + return ensureInstallationAndGetEndpoint(ServiceName.ROUTE53); + } + + public static String getEndpointCloudFormation() { + return ensureInstallationAndGetEndpoint(ServiceName.CLOUDFORMATION); + } + + public static String getEndpointCloudWatch() { + return ensureInstallationAndGetEndpoint(ServiceName.CLOUDWATCH); + } + + public static String getEndpointSSM() { + return ensureInstallationAndGetEndpoint(ServiceName.SSM); + } + + public static String getEndpointSecretsmanager() { + return ensureInstallationAndGetEndpoint(ServiceName.SECRETSMANAGER); + } + + public static String getEndpointStepFunctions() { + return ensureInstallationAndGetEndpoint(ServiceName.STEPFUNCTIONS); + } + + public static String getEndpointEC2() { + return ensureInstallationAndGetEndpoint(ServiceName.EC2); + } + + + /* UTILITY METHODS */ + + /** + * Installs localstack into a temporary directory + * If DEV_ENVIRONMENT for localstack is detected also copies over any changed files + */ + private static void ensureInstallation() { + ensureInstallation(false); + } + + private static void ensureInstallation(boolean initialSetup) { + File dir = new File(TMP_INSTALL_DIR); + File constantsFile = new File(dir, "localstack/constants.py"); + String logMsg = "Installing LocalStack to temporary directory (this may take a while): " + TMP_INSTALL_DIR; + boolean messagePrinted = false; + if (!constantsFile.exists()) { + LOG.info(logMsg); + messagePrinted = true; + deleteDirectory(dir); + exec("git clone " + LOCALSTACK_REPO_URL + " " + TMP_INSTALL_DIR); + } + + if(DEV_ENVIRONMENT && initialSetup) { + // Copy changed files over + Path localstackDir = Paths.get(CURRENT_DEV_DIR); + Path tempLocalstackDir = Paths.get(TMP_INSTALL_DIR); + try { + TestUtils.copyFolder(localstackDir, tempLocalstackDir); + } catch (IOException e) { + throw new RuntimeException(e.getMessage(), e); + } + + ensureJavaFilesRefreshedForDev(); + } + + File installationDoneMarker = new File(dir, "localstack/infra/installation.finished.marker"); + if ( (DEV_ENVIRONMENT && initialSetup) || !installationDoneMarker.exists()) { + if (!messagePrinted) { + LOG.info(logMsg); + } + String useSSL = useSSL() ? "USE_SSL=1" : ""; + exec("cd \"" + TMP_INSTALL_DIR + "\";"+useSSL+" make install"); + /* create marker file */ + try { + installationDoneMarker.createNewFile(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private static void deleteDirectory(File dir) { + try { + if (dir.exists()) + Files.walk(dir.toPath()) + .sorted(Comparator.reverseOrder()) + .map(Path::toFile) + .forEach(File::delete); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static void killProcess(Process p) { + try { + ProcessTree.get().killAll(Collections.singletonMap( + ENV_LOCALSTACK_PROCESS_GROUP, ENV_LOCALSTACK_PROCESS_GROUP)); + } catch (Exception e) { + LOG.warning("Unable to terminate processes: " + e); + } + } + + private static String ensureInstallationAndGetEndpoint(String service) { + return ensureInstallationAndGetEndpoint(service,false); + } + + private static String ensureInstallationAndGetEndpoint(String service, boolean overrideSSL) { + try { + ensureInstallation(); + return getEndpoint(service, overrideSSL); + } catch (Exception e) { + return getDefaultEndpoint(service); + } + } + + public static boolean useSSL() { + return isEnvConfigSet(ENV_CONFIG_USE_SSL); + } + + public static boolean isDebug() { + return isEnvConfigSet(ENV_DEBUG); + } + + public static boolean isEnvConfigSet(String configName) { + String value = System.getenv(configName); + return value != null && !Arrays.asList("false", "0", "").contains(value.trim()); + } + + private static String getEndpoint(String service, boolean overrideSSL) { + try { + return getEndpointFromSource(service, overrideSSL); + } catch (Exception e) { + return getDefaultEndpoint(service); + } + } + + private static String getDefaultEndpoint(String service) { + int port = Constants.DEFAULT_PORTS.get(service); + String protocol = useSSL() ? "https" : "http"; + String localstackHostname = System.getenv(ENV_LOCALSTACK_HOSTNAME); + localstackHostname = localstackHostname == null ? "localhost" : localstackHostname; + return String.format("%s://%s:%s", protocol, localstackHostname, port); + } + + /** + * Gets the endpoint for the service, uses SSL if overrideSSL or environmental config USE_SSL is set + */ + private static String getEndpointFromSource(String service, boolean overrideSSL) { + String useSSL = overrideSSL || useSSL() ? "USE_SSL=1" : ""; + String cmd = "cd '" + TMP_INSTALL_DIR + "'; " + + ". .venv/bin/activate; " + + useSSL + " python -c 'import localstack_client.config; " + + "print(localstack_client.config.get_service_endpoint(\"" + service + "\"))'"; + Process p = exec(cmd); + try { + return IOUtils.toString(p.getInputStream()).trim(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static Process exec(String... cmd) { + return exec(true, cmd); + } + + private static Process exec(boolean wait, String... cmd) { + try { + if (cmd.length == 1 && !new File(cmd[0]).exists()) { + cmd = new String[]{"bash", "-c", cmd[0]}; + } + Map env = new HashMap<>(System.getenv()); + ProcessBuilder builder = new ProcessBuilder(cmd); + builder.environment().put("PATH", ADDITIONAL_PATH + ":" + env.get("PATH")); + builder.environment().put(ENV_LOCALSTACK_PROCESS_GROUP, ENV_LOCALSTACK_PROCESS_GROUP); + final Process p = builder.start(); + if (wait) { + int code = p.waitFor(); + if (code != 0) { + String stderr = IOUtils.toString(p.getErrorStream()); + String stdout = IOUtils.toString(p.getInputStream()); + throw new IllegalStateException("Failed to run command '" + String.join(" ", cmd) + "', return " + + "code " + code + + ".\nSTDOUT: " + stdout + "\nSTDERR: " + stderr); + } + } else { + /* make sure we destroy the process on JVM shutdown */ + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + killProcess(p); + } + }); + } + return p; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void setupInfrastructure() { + synchronized (INFRA_STARTED) { + // make sure everything is installed locally + ensureInstallation(true); + + // make sure we avoid any errors related to locally generated SSL certificates + TestUtils.disableSslCertChecking(); + + if (INFRA_STARTED.get() != null) return; + String[] cmd = new String[]{"make", "-C", TMP_INSTALL_DIR, "infra"}; + Process proc; + try { + proc = exec(false, cmd); + final BufferedReader r1 = new BufferedReader(new InputStreamReader(proc.getInputStream())); + final BufferedReader r2 = new BufferedReader(new InputStreamReader(proc.getErrorStream())); + String line; + LOG.info(TMP_INSTALL_DIR); + LOG.info("Waiting for infrastructure to be spun up"); + boolean ready = false; + boolean debug = isDebug(); + String output = ""; + while ((line = r1.readLine()) != null) { + output += line + "\n"; + if (debug) { + System.out.println("LocalStack: " + line); + } + if (INFRA_READY_MARKER.equals(line)) { + ready = true; + break; + } + } + if (debug) { + dumpStream(r1, System.out); + dumpStream(r2, System.err); + } + if (!ready) { + throw new RuntimeException("Unable to start local infrastructure. Debug output: " + output); + } + INFRA_STARTED.set(proc); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private void dumpStream(BufferedReader r, PrintStream out) { + new Thread(new Runnable() { + public void run() { + String line; + try { + while ((line = r.readLine()) != null) { + out.println("LocalStack: " + line); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + }).start(); + } + + /** + * Compiles the localstack-utils-fat and localstack-utils-tests jars and copies them to the localstack tmp install + * directory. + */ + private static void ensureJavaFilesRefreshedForDev() { + String[] cmdPrepareJava = new String[]{"make", "-C", CURRENT_DEV_DIR + , "prepare-java-tests-infra-jars"}; + exec(true, cmdPrepareJava); + Path currentInfraPath = Paths.get(CURRENT_DEV_DIR, "localstack", "infra"); + Path tmpInfraPath = Paths.get(TMP_INSTALL_DIR, "localstack", "infra"); + Path localstackUtilsFatJar = Paths.get(currentInfraPath.toString(), "localstack-utils-fat.jar"); + Path localstackUtilsTestsJar = Paths.get(currentInfraPath.toString(), "localstack-utils-tests.jar"); + + if(Files.exists(localstackUtilsFatJar)) { + Path tempInstallDirFatJar = Paths.get(tmpInfraPath.toString(), "localstack-utils-fat.jar"); + TestUtils.copy(localstackUtilsFatJar, tempInstallDirFatJar); + } + + if(Files.exists(localstackUtilsTestsJar)) { + Path tempInstallDirTestsJar = Paths.get(tmpInfraPath.toString(), "localstack-utils-tests.jar"); + TestUtils.copy(localstackUtilsTestsJar, tempInstallDirTestsJar); + } + } + + public static void teardownInfrastructure() { + Process proc = INFRA_STARTED.get(); + if (proc == null) { + return; + } + killProcess(proc); + INFRA_STARTED.set(null); + } + + public static String getDefaultRegion() { + return TestUtils.DEFAULT_REGION; + } +} diff --git a/src/main/java/cloud/localstack/deprecated/LocalstackExtension.java b/src/main/java/cloud/localstack/deprecated/LocalstackExtension.java new file mode 100644 index 0000000..b1f902b --- /dev/null +++ b/src/main/java/cloud/localstack/deprecated/LocalstackExtension.java @@ -0,0 +1,18 @@ +package cloud.localstack.deprecated; + +import org.junit.jupiter.api.extension.BeforeTestExecutionCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +/** + * Simple JUnit extension for JUnit 5. + * + * @author Patrick Allain + */ +@Deprecated +public class LocalstackExtension implements BeforeTestExecutionCallback { + + @Override + public void beforeTestExecution(final ExtensionContext context) { + Localstack.INSTANCE.setupInfrastructure(); + } +} diff --git a/src/main/java/cloud/localstack/deprecated/LocalstackOutsideDockerTestRunner.java b/src/main/java/cloud/localstack/deprecated/LocalstackOutsideDockerTestRunner.java new file mode 100644 index 0000000..acf55fe --- /dev/null +++ b/src/main/java/cloud/localstack/deprecated/LocalstackOutsideDockerTestRunner.java @@ -0,0 +1,34 @@ +package cloud.localstack.deprecated; + +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.model.InitializationError; + +/** + * Simple JUnit test runner that automatically downloads, installs, starts, + * and stops the LocalStack local cloud infrastructure components. + * + * Should work cross-OS, however has been only tested under Unix (Linux/MacOS). + * + * Update 2019-12-07: This test runner has now been deprecated. The main reason is that + * it attempts to install various dependencies on the local machine, which frequently + * causes issues for users running in different OSs or environments. Please use the + * Docker-based test running instead, which is now the default. + * + * @author Waldemar Hummer + * @author Patrick Allain + */ +@Deprecated +public class LocalstackOutsideDockerTestRunner extends BlockJUnit4ClassRunner { + + public LocalstackOutsideDockerTestRunner(Class klass) throws InitializationError { + super(klass); + } + + @Override + public void run(RunNotifier notifier) { + Localstack.INSTANCE.setupInfrastructure(); + super.run(notifier); + } + +} diff --git a/src/main/java/cloud/localstack/deprecated/TestUtils.java b/src/main/java/cloud/localstack/deprecated/TestUtils.java new file mode 100644 index 0000000..3139763 --- /dev/null +++ b/src/main/java/cloud/localstack/deprecated/TestUtils.java @@ -0,0 +1,275 @@ +package cloud.localstack.deprecated; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.client.builder.ExecutorFactory; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisAsync; +import com.amazonaws.services.kinesis.AmazonKinesisAsyncClientBuilder; +import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.AWSLambdaAsync; +import com.amazonaws.services.lambda.AWSLambdaAsyncClientBuilder; +import com.amazonaws.services.lambda.AWSLambdaClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.AmazonSNSAsync; +import com.amazonaws.services.sns.AmazonSNSAsyncClientBuilder; +import com.amazonaws.services.sns.AmazonSNSClientBuilder; +import com.amazonaws.services.sqs.*; +import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSClientBuilder; +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.channels.FileChannel; +import java.nio.file.CopyOption; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +import static cloud.localstack.TestUtils.getCredentialsProvider; + +@Deprecated +@SuppressWarnings("all") +public class TestUtils { + + public static final String DEFAULT_REGION = "us-east-1"; + public static final String TEST_ACCESS_KEY = "test"; + public static final String TEST_SECRET_KEY = "test"; + public static final AWSCredentials TEST_CREDENTIALS = new BasicAWSCredentials(TEST_ACCESS_KEY, TEST_SECRET_KEY); + + private static final String[] EXCLUDED_DIRECTORIES = { + ".github", ".git", ".idea", ".venv", "target", "node_modules" + }; + + public static void setEnv(String key, String value) { + Map newEnv = new HashMap(System.getenv()); + newEnv.put(key, value); + setEnv(newEnv); + } + + public static AmazonSQS getClientSQS() { + return AmazonSQSClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSQS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSQSAsync getClientSQSAsync() { + return AmazonSQSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSQS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSQSAsync getClientSQSAsync(final ExecutorFactory executorFactory) { + return AmazonSQSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSQS()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNS getClientSNS() { + return AmazonSNSClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNSAsync getClientSNSAsync() { + return AmazonSNSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonSNSAsync getClientSNSAsync(final ExecutorFactory executorFactory) { + return AmazonSNSAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSNS()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambda getClientLambda() { + return AWSLambdaClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambdaAsync getClientLambdaAsync() { + return AWSLambdaAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AWSLambdaAsync getClientLambdaAsync(final ExecutorFactory executorFactory) { + return AWSLambdaAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationLambda()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonS3 getClientS3() { + AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationS3()). + withCredentials(getCredentialsProvider()); + builder.setPathStyleAccessEnabled(true); + return builder.build(); + } + + public static AmazonS3 getClientS3SSL() { + AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationS3SSL()). + withCredentials(getCredentialsProvider()); + builder.setPathStyleAccessEnabled(true); + return builder.build(); + } + + public static AWSSecretsManager getClientSecretsManager() { + return AWSSecretsManagerClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationSecretsManager()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesis getClientKinesis() { + return AmazonKinesisClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesisAsync getClientKinesisAsync() { + return AmazonKinesisAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withCredentials(getCredentialsProvider()).build(); + } + + public static AmazonKinesisAsync getClientKinesisAsync(final ExecutorFactory executorFactory) { + return AmazonKinesisAsyncClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationKinesis()). + withExecutorFactory(executorFactory). + withCredentials(getCredentialsProvider()).build(); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationLambda() { + return getEndpointConfiguration(Localstack.getEndpointLambda()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationKinesis() { + return getEndpointConfiguration(Localstack.getEndpointKinesis()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSQS() { + return getEndpointConfiguration(Localstack.getEndpointSQS()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationS3() { + return getEndpointConfiguration(Localstack.getEndpointS3()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSNS() { + return getEndpointConfiguration(Localstack.getEndpointSNS()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationS3SSL() { + return getEndpointConfiguration(Localstack.getEndpointS3(true)); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSecretsManager() { + return getEndpointConfiguration(Localstack.getEndpointSecretsmanager()); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationStepFunctions() { + return getEndpointConfiguration(Localstack.getEndpointStepFunctions()); + } + + protected static void setEnv(Map newEnv) { + try { + Class processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); + Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); + theEnvironmentField.setAccessible(true); + Map env = (Map) theEnvironmentField.get(null); + env.putAll(newEnv); + Field theCaseInsensitiveEnvironmentField = processEnvironmentClass + .getDeclaredField("theCaseInsensitiveEnvironment"); + theCaseInsensitiveEnvironmentField.setAccessible(true); + Map cienv = (Map) theCaseInsensitiveEnvironmentField.get(null); + cienv.putAll(newEnv); + } catch (NoSuchFieldException e) { + try { + Class[] classes = Collections.class.getDeclaredClasses(); + Map env = System.getenv(); + for (Class cl : classes) { + if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { + Field field = cl.getDeclaredField("m"); + field.setAccessible(true); + Object obj = field.get(env); + Map map = (Map) obj; + map.clear(); + map.putAll(newEnv); + } + } + } catch (Exception e2) { + e2.printStackTrace(); + } + } catch (Exception e1) { + e1.printStackTrace(); + } + } + + public static void disableSslCertChecking() { + System.setProperty("com.amazonaws.sdk.disableCertChecking", "true"); + } + + public static void copyFolder(Path src, Path dest) throws IOException { + try(Stream stream = Files.walk(src)) { + stream.forEach(source -> { + boolean isExcluded = Arrays.stream(EXCLUDED_DIRECTORIES) + .anyMatch( excluded -> source.toAbsolutePath().toString().contains(excluded)); + if (!isExcluded) { + copy(source, dest.resolve(src.relativize(source))); + } + }); + } + } + + public static void copy(Path source, Path dest) { + try { + CopyOption[] options = new CopyOption[] {StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING}; + if(Files.isDirectory(dest)) { + // continue without copying + return; + } + if (Files.exists(dest)) { + try(FileChannel sourceFile = FileChannel.open(source)) { + try (FileChannel destFile = FileChannel.open(dest)) { + if (!Files.getLastModifiedTime(source).equals(Files.getLastModifiedTime(dest)) + || sourceFile.size() != destFile.size() + ) { + Files.copy(source, dest, options); + } + } + } + } else { + Files.copy(source, dest, options); + } + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + public static AWSCredentialsProvider getCredentialsProvider() { + return new AWSStaticCredentialsProvider(TEST_CREDENTIALS); + } + + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfiguration(String endpointURL) { + return new AwsClientBuilder.EndpointConfiguration(endpointURL, DEFAULT_REGION); + } + +} diff --git a/src/main/java/cloud/localstack/docker/Container.java b/src/main/java/cloud/localstack/docker/Container.java new file mode 100644 index 0000000..cfbc074 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/Container.java @@ -0,0 +1,202 @@ +package cloud.localstack.docker; + +import cloud.localstack.Localstack; +import cloud.localstack.docker.command.*; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import java.util.regex.Pattern; + +/** + * An abstraction of the LocalStack docker container. Provides port mappings, a way + * to poll the logs until a specified token appears, and the ability to stop the container. + */ +public class Container { + + private static final Logger LOG = Logger.getLogger(Container.class.getName()); + + private static final String LOCALSTACK_NAME = "localstack/localstack"; + private static final String LOCALSTACK_PORTS = "4567-4584"; + + private static final int MAX_PORT_CONNECTION_ATTEMPTS = 10; + + private static final int MAX_LOG_COLLECTION_ATTEMPTS = 120; + private static final long POLL_INTERVAL = 1000; + private static final int NUM_LOG_LINES = 10; + + private static final String ENV_DEBUG = "DEBUG"; + private static final String ENV_USE_SSL = "USE_SSL"; + private static final String ENV_DEBUG_DEFAULT = "1"; + public static final String LOCALSTACK_EXTERNAL_HOSTNAME = "HOSTNAME_EXTERNAL"; + + private static final String DEFAULT_CONTAINER_ID = "localstack_main"; + + private final String containerId; + private final List ports; + + private boolean startedByUs; + + /** + * It creates a container using the hostname given and the set of environment variables provided + * @param externalHostName hostname to be used by localstack + * @param pullNewImage determines if docker pull should be run to update to the latest image of the container + * @param randomizePorts determines if the container should expose the default local stack ports or if it should expose randomized ports + * in order to prevent conflicts with other localstack containers running on the same machine + * @param environmentVariables map of environment variables to be passed to the docker container + */ + public static Container createLocalstackContainer( + String externalHostName, boolean pullNewImage, boolean randomizePorts, String imageTag, + Map environmentVariables, Map portMappings) { + + environmentVariables = environmentVariables == null ? Collections.emptyMap() : environmentVariables; + portMappings = portMappings == null ? Collections.emptyMap() : portMappings; + + String fullImageName = LOCALSTACK_NAME + ":" + (imageTag == null ? "latest" : imageTag); + boolean imageExists = new ListImagesCommand().execute().contains(fullImageName); + + if(pullNewImage || !imageExists) { + LOG.info("Pulling latest image..."); + new PullCommand(LOCALSTACK_NAME, imageTag).execute(); + } + + RunCommand runCommand = new RunCommand(LOCALSTACK_NAME, imageTag) + .withExposedPorts(LOCALSTACK_PORTS, randomizePorts) + .withEnvironmentVariable(LOCALSTACK_EXTERNAL_HOSTNAME, externalHostName) + .withEnvironmentVariable(ENV_DEBUG, ENV_DEBUG_DEFAULT) + .withEnvironmentVariable(ENV_USE_SSL, Localstack.INSTANCE.useSSL() ? "1" : "0") + .withEnvironmentVariables(environmentVariables); + for (Integer port : portMappings.keySet()) { + runCommand = runCommand.withExposedPorts("" + port, false); + } + String containerId = runCommand.execute(); + LOG.info("Started container: " + containerId); + + Container result = getRunningLocalstackContainer(containerId); + result.startedByUs = true; + return result; + } + + + public static Container getRunningLocalstackContainer() { + return getRunningLocalstackContainer(DEFAULT_CONTAINER_ID); + } + + public static Container getRunningLocalstackContainer(String containerId) { + List portMappingsList = new PortCommand(containerId).execute(); + return new Container(containerId, portMappingsList); + } + + + private Container(String containerId, List ports) { + this.containerId = containerId; + this.ports = Collections.unmodifiableList(ports); + } + + + /** + * Given an internal port, retrieve the publicly addressable port that maps to it + */ + public int getExternalPortFor(int internalPort) { + return ports.stream() + .filter(port -> port.getInternalPort() == internalPort) + .map(PortMapping::getExternalPort) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException("Port: " + internalPort + " does not exist")); + } + + + public void waitForAllPorts(String ip) { + ports.forEach(port -> waitForPort(ip, port)); + } + + + private void waitForPort(String ip, PortMapping port) { + int attempts = 0; + do { + if(isPortOpen(ip, port)) { + return; + } + attempts++; + } + while(attempts < MAX_PORT_CONNECTION_ATTEMPTS); + + throw new IllegalStateException("Could not open port:" + port.getExternalPort() + " on ip:" + port.getIp()); + } + + + private boolean isPortOpen(String ip, PortMapping port) { + try (Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(ip, port.getExternalPort()), 1000); + return true; + } catch (IOException e) { + return false; + } + } + + + /** + * Poll the docker logs until a specific token appears, then return. Primarily + * used to look for the "Ready." token in the LocalStack logs. + */ + public void waitForLogToken(Pattern pattern) { + int attempts = 0; + do { + if(logContainsPattern(pattern)) { + return; + } + waitForLogs(); + attempts++; + } + while(attempts < MAX_LOG_COLLECTION_ATTEMPTS); + + String logs = getContainerLogs(); + throw new IllegalStateException("Could not find token: " + pattern + " in Docker logs: " + logs); + } + + + private boolean logContainsPattern(Pattern pattern) { + String logs = getContainerLogs(); + return pattern.matcher(logs).find(); + } + + + private String getContainerLogs() { + return new LogCommand(containerId).withNumberOfLines(NUM_LOG_LINES).execute(); + } + + + private void waitForLogs(){ + try { + Thread.sleep(POLL_INTERVAL); + } + catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + } + + + /** + * Stop the container + */ + public void stop() { + if (!startedByUs) { + return; + } + new StopCommand(containerId).execute(); + LOG.info("Stopped container: " + containerId); + } + + + /** + * Run a command on the container via docker exec + */ + public String executeCommand(List command) { + return new ExecCommand(containerId).execute(command); + } +} diff --git a/src/main/java/cloud/localstack/docker/DockerExe.java b/src/main/java/cloud/localstack/docker/DockerExe.java new file mode 100644 index 0000000..7f18750 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/DockerExe.java @@ -0,0 +1,90 @@ +package cloud.localstack.docker; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.concurrent.Executors.newSingleThreadExecutor; +import static java.util.stream.Collectors.joining; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +/** + * A wrapper around the docker executable. The DOCKER_LOCATION environment variable + * can be used if docker is not installed in a default location. + */ +public class DockerExe { + + private static final int DEFAULT_WAIT_TIME_MINUTES = 2; + + private static final List POSSIBLE_EXE_LOCATIONS = Arrays.asList( + System.getenv("DOCKER_LOCATION"), + "C:/program files/docker/docker/resources/bin/docker.exe", + "/usr/local/bin/docker", + "/usr/bin/docker"); + + private final String exeLocation; + + public DockerExe() { + this.exeLocation = getDockerExeLocation(); + } + + private String getDockerExeLocation() { + return POSSIBLE_EXE_LOCATIONS.stream() + .filter(Objects::nonNull) + .filter(name -> new File(name).exists()) + .findFirst() + .orElseThrow(() -> new IllegalStateException("Cannot find docker executable.")); + } + + public String execute(List args) { + return execute(args, DEFAULT_WAIT_TIME_MINUTES); + } + + public String execute(List args, int waitTimeoutMinutes) { + return execute(args, waitTimeoutMinutes, Arrays.asList()); + } + + public String execute(List args, int waitTimeoutMinutes, List errorCodes) { + try { + List command = new ArrayList<>(); + command.add(exeLocation); + command.addAll(args); + + Process process = new ProcessBuilder() + .command(command) + .redirectErrorStream(true) + .start(); + + ExecutorService exec = newSingleThreadExecutor(); + Future outputFuture = exec.submit(() -> handleOutput(process)); + + String output = outputFuture.get(waitTimeoutMinutes, TimeUnit.MINUTES); + process.waitFor(waitTimeoutMinutes, TimeUnit.MINUTES); + int code = process.exitValue(); + exec.shutdown(); + + if (errorCodes.contains(code)) { + throw new RuntimeException("Error status code " + code + " returned from process. Output: " + output); + } + + return output; + } catch (Exception ex) { + if (ex instanceof RuntimeException) { + throw (RuntimeException) ex; + } + throw new RuntimeException("Failed to execute command", ex); + } + } + + private String handleOutput(Process process) { + BufferedReader stdout = new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8)); + return stdout.lines().collect(joining(System.lineSeparator())); + } +} diff --git a/src/main/java/cloud/localstack/docker/LocalstackDockerExtension.java b/src/main/java/cloud/localstack/docker/LocalstackDockerExtension.java new file mode 100644 index 0000000..15231d5 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/LocalstackDockerExtension.java @@ -0,0 +1,61 @@ +package cloud.localstack.docker; + +import cloud.localstack.Localstack; +import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor; +import cloud.localstack.docker.annotation.LocalstackDockerConfiguration; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +/** + * JUnit test runner that automatically pulls and runs the latest localstack docker image + * and then terminates when tests are complete. + * + * Having docker installed is a prerequisite for this test runner to execute. If docker + * is not installed in one of the default locations (C:\program files\docker\docker\resources\bin\, usr/local/bin or + * usr/bin) + * then use the DOCKER_LOCATION environment variable to specify the location. + * + * Since ports are dynamically allocated, the external port needs to be resolved based on the default localstack port. + * + * The hostname defaults to localhost, but in some environments that is not sufficient, so the HostName can be specified + * by using the LocalstackDockerProperties annotation with an IHostNameResolver. + * + * @author Alan Bevier + * @author Patrick Allain + * @author Omar Khammassi + */ +public class LocalstackDockerExtension implements BeforeAllCallback { + + private static final LocalstackDockerAnnotationProcessor PROCESSOR = new LocalstackDockerAnnotationProcessor(); + private static final ExtensionContext.Namespace NAMESPACE = ExtensionContext.Namespace.create(LocalstackDockerExtension.class); + + @Override + public void beforeAll(final ExtensionContext context) throws Exception { + final ExtensionContext.Store store; + if (isUseSingleDockerContainer(context)) { + store = context.getRoot().getStore(ExtensionContext.Namespace.GLOBAL); + } else { + store = context.getStore(NAMESPACE); + } + store.getOrComputeIfAbsent("localstack", key -> new LocalstackDockerExtension.StartedLocalStack(context)); + } + + private boolean isUseSingleDockerContainer(final ExtensionContext context) { + return PROCESSOR.process(context.getRequiredTestClass()).isUseSingleDockerContainer(); + } + + static class StartedLocalStack implements ExtensionContext.Store.CloseableResource { + + private Localstack localstackDocker = Localstack.INSTANCE; + + StartedLocalStack(ExtensionContext context) { + final LocalstackDockerConfiguration dockerConfig = PROCESSOR.process(context.getRequiredTestClass()); + localstackDocker.startup(dockerConfig); + } + + @Override + public void close() throws Throwable { + localstackDocker.stop(); + } + } +} diff --git a/src/main/java/cloud/localstack/docker/PortMapping.java b/src/main/java/cloud/localstack/docker/PortMapping.java new file mode 100644 index 0000000..8e32c9c --- /dev/null +++ b/src/main/java/cloud/localstack/docker/PortMapping.java @@ -0,0 +1,33 @@ +package cloud.localstack.docker; + +/** + * Keeps track of the external to internal port mapping for a container + */ +public class PortMapping { + private final String ip; + private final int externalPort; + private final int internalPort; + + public PortMapping(String ip, String externalPort, String internalPort) { + this.ip = ip; + this.externalPort = Integer.parseInt(externalPort); + this.internalPort = Integer.parseInt(internalPort); + } + + public String getIp() { + return ip; + } + + public int getExternalPort() { + return externalPort; + } + + public int getInternalPort() { + return internalPort; + } + + @Override + public String toString() { + return String.format("%s:%s -> %s", ip, externalPort, internalPort); + } +} diff --git a/src/main/java/cloud/localstack/docker/annotation/DefaultEnvironmentVariableProvider.java b/src/main/java/cloud/localstack/docker/annotation/DefaultEnvironmentVariableProvider.java new file mode 100644 index 0000000..f6210e2 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/DefaultEnvironmentVariableProvider.java @@ -0,0 +1,13 @@ +package cloud.localstack.docker.annotation; + +import java.util.HashMap; +import java.util.Map; + +public class DefaultEnvironmentVariableProvider implements IEnvironmentVariableProvider { + + @Override + public Map getEnvironmentVariables() { + return new HashMap<>(); + } + +} diff --git a/src/main/java/cloud/localstack/docker/annotation/EC2HostNameResolver.java b/src/main/java/cloud/localstack/docker/annotation/EC2HostNameResolver.java new file mode 100644 index 0000000..7f3290a --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/EC2HostNameResolver.java @@ -0,0 +1,23 @@ +package cloud.localstack.docker.annotation; + +import com.amazonaws.util.EC2MetadataUtils; + +/** + * Finds the hostname of the current EC2 instance + * + * This is useful for a CI server that is itself a docker container and which mounts the docker unix socket + * from the host machine. In that case, the server cannot spawn child containers but will instead spawn sibling + * containers which cannot be addressed at "localhost". In order to address the sibling containers you need to resolve + * the hostname of the host machine, which this method will accomplish. + * + * For more information about running docker for CI and mounting the host socket please look here: + * http://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ + */ +public class EC2HostNameResolver implements IHostNameResolver { + + @Override + public String getHostName() { + return EC2MetadataUtils.getLocalHostName(); + } + +} diff --git a/src/main/java/cloud/localstack/docker/annotation/IEnvironmentVariableProvider.java b/src/main/java/cloud/localstack/docker/annotation/IEnvironmentVariableProvider.java new file mode 100644 index 0000000..20c8e06 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/IEnvironmentVariableProvider.java @@ -0,0 +1,8 @@ +package cloud.localstack.docker.annotation; + +import java.util.Map; + +public interface IEnvironmentVariableProvider { + + Map getEnvironmentVariables(); +} diff --git a/src/main/java/cloud/localstack/docker/annotation/IHostNameResolver.java b/src/main/java/cloud/localstack/docker/annotation/IHostNameResolver.java new file mode 100644 index 0000000..bdbbffd --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/IHostNameResolver.java @@ -0,0 +1,5 @@ +package cloud.localstack.docker.annotation; + +public interface IHostNameResolver { + String getHostName(); +} diff --git a/src/main/java/cloud/localstack/docker/annotation/LocalHostNameResolver.java b/src/main/java/cloud/localstack/docker/annotation/LocalHostNameResolver.java new file mode 100644 index 0000000..24c21e0 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/LocalHostNameResolver.java @@ -0,0 +1,12 @@ +package cloud.localstack.docker.annotation; + +/** + * A default host name resolver + */ +public class LocalHostNameResolver implements IHostNameResolver { + + @Override + public String getHostName() { + return "localhost"; + } +} diff --git a/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerAnnotationProcessor.java b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerAnnotationProcessor.java new file mode 100644 index 0000000..5b2c9cc --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerAnnotationProcessor.java @@ -0,0 +1,85 @@ +package cloud.localstack.docker.annotation; + +import org.apache.commons.lang3.StringUtils; + +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Logger; +import java.util.stream.Stream; + +/** + * Processor to retrieve docker configuration based on {@link LocalstackDockerProperties} annotation. + * + * @author Alan Bevier + * @author Patrick Allain + * @author Omar Khammassi + */ +public class LocalstackDockerAnnotationProcessor { + + private static final Logger LOG = Logger.getLogger(LocalstackDockerAnnotationProcessor.class.getName()); + + public LocalstackDockerConfiguration process(final Class klass) { + return Stream.of(klass.getAnnotations()) + .filter(annotation -> annotation instanceof LocalstackDockerProperties) + .map(i -> (LocalstackDockerProperties) i) + .map(this::processDockerPropertiesAnnotation) + .findFirst() + .orElse(LocalstackDockerConfiguration.DEFAULT); + } + + private LocalstackDockerConfiguration processDockerPropertiesAnnotation(LocalstackDockerProperties properties) { + return LocalstackDockerConfiguration.builder() + .environmentVariables(this.getEnvironments(properties)) + .externalHostName(this.getExternalHostName(properties)) + .portMappings(this.getCustomPortMappings(properties)) + .pullNewImage(properties.pullNewImage()) + .ignoreDockerRunErrors(properties.ignoreDockerRunErrors()) + .randomizePorts(properties.randomizePorts()) + .imageTag(StringUtils.isEmpty(properties.imageTag()) ? null : properties.imageTag()) + .useSingleDockerContainer(properties.useSingleDockerContainer()) + .build(); + } + + private Map getCustomPortMappings(final LocalstackDockerProperties properties) { + final Map portMappings = new HashMap<>(); + for (String service : properties.services()) { + String[] parts = service.split(":"); + if (parts.length > 1) { + int port = Integer.parseInt(parts[1]); + portMappings.put(port, port); + } + } + return portMappings; + } + + private Map getEnvironments(final LocalstackDockerProperties properties) { + final Map environmentVariables = new HashMap<>(); + try { + IEnvironmentVariableProvider environmentProvider = properties.environmentVariableProvider().newInstance(); + environmentVariables.putAll(environmentProvider.getEnvironmentVariables()); + } catch (InstantiationException | IllegalAccessException ex) { + throw new IllegalStateException("Unable to get environment variables", ex); + } + + final String services = String.join(",", properties.services()); + if (StringUtils.isNotEmpty(services)) { + environmentVariables.put("SERVICES", services); + } + return environmentVariables; + } + + private String getExternalHostName(final LocalstackDockerProperties properties) { + try { + IHostNameResolver hostNameResolver = properties.hostNameResolver().newInstance(); + String resolvedName = hostNameResolver.getHostName(); + + final String externalHostName = StringUtils.defaultIfBlank(resolvedName, "localhost"); + + LOG.info("External host name is set to: " + externalHostName); + return externalHostName; + } catch (InstantiationException | IllegalAccessException ex) { + throw new IllegalStateException("Unable to resolve hostname", ex); + } + } + +} diff --git a/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerConfiguration.java b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerConfiguration.java new file mode 100644 index 0000000..100460e --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerConfiguration.java @@ -0,0 +1,43 @@ +package cloud.localstack.docker.annotation; + +import lombok.Builder; +import lombok.Data; + +import java.util.Collections; +import java.util.Map; + +/** + * Bean to specify the docker configuration. + * + * @author Patrick Allain + * @author Waldemar Hummer + * @author Omar Khammassi + */ +@Data +@Builder +public class LocalstackDockerConfiguration { + + public static final LocalstackDockerConfiguration DEFAULT = LocalstackDockerConfiguration.builder().build(); + + private final boolean pullNewImage; + + private final boolean randomizePorts; + + private final String imageTag; + + @Builder.Default + private final String externalHostName = "localhost"; + + @Builder.Default + private final Map environmentVariables = Collections.emptyMap(); + + @Builder.Default + private final Map portMappings = Collections.emptyMap(); + + @Builder.Default + private final boolean useSingleDockerContainer = false; + + @Builder.Default + private final boolean ignoreDockerRunErrors = false; + +} diff --git a/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerProperties.java b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerProperties.java new file mode 100644 index 0000000..8615ecd --- /dev/null +++ b/src/main/java/cloud/localstack/docker/annotation/LocalstackDockerProperties.java @@ -0,0 +1,61 @@ +package cloud.localstack.docker.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.lang.annotation.Inherited; + +/** + * An annotation to provide parameters to the main (Docker-based) LocalstackTestRunner + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +@Inherited +public @interface LocalstackDockerProperties { + + /** + * Used for determining the host name of the machine running the docker containers + * so that the containers can be addressed. + */ + Class hostNameResolver() default LocalHostNameResolver.class; + + /** + * Used for injecting environment variables into the container. Implement a class that provides a map of the environment + * variables and they will be injected into the container on start-up + */ + Class environmentVariableProvider() default DefaultEnvironmentVariableProvider.class; + + /** + * Determines if a new image is pulled from the docker repo before the tests are run. + */ + boolean pullNewImage() default false; + + /** + * Determines if the container should expose the default local stack ports (4567-4583) or if it should expose randomized ports + * in order to prevent conflicts with other localstack containers running on the same machine + */ + boolean randomizePorts() default false; + + /** + * Determines which services should be run when the localstack starts. When empty, all the services available get + * up and running. + */ + String[] services() default {}; + + /** + * Use a specific image tag for docker container + */ + String imageTag() default ""; + + /** + * Determines if the singleton container should be used by all test classes + */ + boolean useSingleDockerContainer() default false; + + /** + * Determines if errors should be ignored when starting the Docker container. + * This can be used to run tests with an existing LocalStack container running on the host. + */ + boolean ignoreDockerRunErrors() default false; +} diff --git a/src/main/java/cloud/localstack/docker/command/Command.java b/src/main/java/cloud/localstack/docker/command/Command.java new file mode 100644 index 0000000..0cad3a3 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/Command.java @@ -0,0 +1,18 @@ +package cloud.localstack.docker.command; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import cloud.localstack.docker.DockerExe; + +public abstract class Command { + + protected final DockerExe dockerExe = new DockerExe(); + + protected List options = new ArrayList<>(); + + protected void addOptions(String ...items) { + options.addAll(Arrays.asList(items)); + } +} diff --git a/src/main/java/cloud/localstack/docker/command/ExecCommand.java b/src/main/java/cloud/localstack/docker/command/ExecCommand.java new file mode 100644 index 0000000..595bcb4 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/ExecCommand.java @@ -0,0 +1,21 @@ +package cloud.localstack.docker.command; + +import java.util.ArrayList; +import java.util.List; + +public class ExecCommand extends Command { + + private final String containerId; + + public ExecCommand(String containerId) { + this.containerId = containerId; + } + + public String execute(List command) { + List args = new ArrayList<>(); + args.add("exec"); + args.add(containerId); + args.addAll(command); + return dockerExe.execute(args); + } +} diff --git a/src/main/java/cloud/localstack/docker/command/ListImagesCommand.java b/src/main/java/cloud/localstack/docker/command/ListImagesCommand.java new file mode 100644 index 0000000..dc8380b --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/ListImagesCommand.java @@ -0,0 +1,11 @@ +package cloud.localstack.docker.command; + +import java.util.*; + +public class ListImagesCommand extends Command { + + public List execute() { + List params = Arrays.asList("images", "--format", "{{.Repository}}:{{.Tag}}"); + return Arrays.asList(dockerExe.execute(params).split("\n")); + } +} diff --git a/src/main/java/cloud/localstack/docker/command/LogCommand.java b/src/main/java/cloud/localstack/docker/command/LogCommand.java new file mode 100644 index 0000000..8a60b3f --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/LogCommand.java @@ -0,0 +1,29 @@ +package cloud.localstack.docker.command; + +import java.util.ArrayList; +import java.util.List; + +public class LogCommand extends Command { + + private final String containerId; + + public LogCommand(String containerId) { + this.containerId = containerId; + } + + + public String execute() { + List args = new ArrayList<>(); + args.add("logs"); + args.addAll(options); + args.add(containerId); + + return dockerExe.execute(args); + } + + + public LogCommand withNumberOfLines(Integer numberOfLines){ + this.addOptions("--tail", numberOfLines.toString()); + return this; + } +} diff --git a/src/main/java/cloud/localstack/docker/command/PortCommand.java b/src/main/java/cloud/localstack/docker/command/PortCommand.java new file mode 100644 index 0000000..915d060 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/PortCommand.java @@ -0,0 +1,35 @@ +package cloud.localstack.docker.command; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.regex.MatchResult; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import cloud.localstack.docker.PortMapping; + +public class PortCommand extends Command { + + private static final Pattern PORT_MAPPING_PATTERN = Pattern.compile("(\\d+)/tcp -> ((\\d)(\\.(\\d)){3}):(\\d+)"); + private static final int INTERNAL_PORT_GROUP = 1; + private static final int EXTERNAL_PORT_GROUP = 6; + private static final int IP_GROUP = 2; + + private final String containerId; + + public PortCommand(String containerId) { + this.containerId = containerId; + } + + public List execute() { + String output = dockerExe.execute(Arrays.asList("port", containerId)); + + return new RegexStream(PORT_MAPPING_PATTERN.matcher(output)).stream() + .map(matchToPortMapping) + .collect(Collectors.toList()); + } + + private Function matchToPortMapping = m -> new PortMapping(m.group(IP_GROUP), m.group(EXTERNAL_PORT_GROUP), m.group(INTERNAL_PORT_GROUP)); + +} diff --git a/src/main/java/cloud/localstack/docker/command/PullCommand.java b/src/main/java/cloud/localstack/docker/command/PullCommand.java new file mode 100644 index 0000000..e051dda --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/PullCommand.java @@ -0,0 +1,27 @@ +package cloud.localstack.docker.command; + +import java.util.Arrays; + +public class PullCommand extends Command { + + private static final int PULL_COMMAND_TIMEOUT_MINUTES = 7; + private static final String LATEST_TAG = "latest"; + + private final String imageName; + + private final String imageTag; + + public PullCommand(String imageName) { + this(imageName, null); + } + + public PullCommand(String imageName, String imageTag) { + this.imageName = imageName; + this.imageTag = imageTag; + } + + public void execute() { + String image = String.format("%s:%s", imageName, imageTag == null ? LATEST_TAG : imageTag); + dockerExe.execute(Arrays.asList("pull", image), PULL_COMMAND_TIMEOUT_MINUTES); + } +} diff --git a/src/main/java/cloud/localstack/docker/command/RegexStream.java b/src/main/java/cloud/localstack/docker/command/RegexStream.java new file mode 100644 index 0000000..1465a61 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/RegexStream.java @@ -0,0 +1,53 @@ +package cloud.localstack.docker.command; + +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +public class RegexStream { + + private final MatcherSpliterator matcherSpliterator; + + public RegexStream(Matcher matcher) { + this.matcherSpliterator = new MatcherSpliterator(matcher); + } + + public Stream stream(){ + return StreamSupport.stream(matcherSpliterator, false); + } + + + private class MatcherSpliterator implements Spliterator { + + private final Matcher matcher; + public MatcherSpliterator(Matcher matcher) { + this.matcher = matcher; + } + + @Override + public boolean tryAdvance(Consumer action) { + boolean found = matcher.find(); + if(found) { + action.accept(matcher); + } + return found; + } + + @Override + public Spliterator trySplit() { + return null; + } + + @Override + public long estimateSize() { + return 0; + } + + @Override + public int characteristics() { + return 0; + } + } +} diff --git a/src/main/java/cloud/localstack/docker/command/RunCommand.java b/src/main/java/cloud/localstack/docker/command/RunCommand.java new file mode 100644 index 0000000..e75873e --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/RunCommand.java @@ -0,0 +1,58 @@ +package cloud.localstack.docker.command; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class RunCommand extends Command { + + private static final int PULL_AND_RUN_TIMEOUT_MINUTES = 7; + + private final String imageName; + + private final String imageTag; + + public RunCommand(String imageName) { + this(imageName, null); + } + + public RunCommand(String imageName, String imageTag) { + this.imageName = imageName; + this.imageTag = imageTag; + } + + public String execute() { + List args = new ArrayList<>(); + args.add("run"); + args.add("-d"); + args.add("--rm"); + args.addAll(options); + args.add(imageTag == null ? imageName : String.format("%s:%s", imageName, imageTag)); + + // See details here: https://docs.docker.com/engine/reference/run/#exit-status + List errorCodes = Arrays.asList(125, 126, 127); + return dockerExe.execute(args, PULL_AND_RUN_TIMEOUT_MINUTES, errorCodes); + } + + public RunCommand withExposedPorts(String portsToExpose, boolean randomize) { + String portsOption = String.format("%s:%s", randomize ? "" : portsToExpose, portsToExpose ); + addOptions("-p", portsOption); + return this; + } + + public RunCommand withEnvironmentVariable(String name, String value) { + addEnvOption(name, value); + return this; + } + + public RunCommand withEnvironmentVariables(Map environmentVariables) { + environmentVariables.forEach((name, value) -> addEnvOption(name, value)); + return this; + } + + private void addEnvOption(String name, String value) { + addOptions("-e", String.format("%s=%s", name, value)); + } + +} diff --git a/src/main/java/cloud/localstack/docker/command/StopCommand.java b/src/main/java/cloud/localstack/docker/command/StopCommand.java new file mode 100644 index 0000000..fd7809a --- /dev/null +++ b/src/main/java/cloud/localstack/docker/command/StopCommand.java @@ -0,0 +1,16 @@ +package cloud.localstack.docker.command; + +import java.util.Arrays; + +public class StopCommand extends Command { + + private final String containerId; + + public StopCommand(String containerId) { + this.containerId = containerId; + } + + public void execute() { + dockerExe.execute(Arrays.asList("stop", containerId)); + } +} diff --git a/src/main/java/cloud/localstack/docker/exception/LocalstackDockerException.java b/src/main/java/cloud/localstack/docker/exception/LocalstackDockerException.java new file mode 100644 index 0000000..743ad82 --- /dev/null +++ b/src/main/java/cloud/localstack/docker/exception/LocalstackDockerException.java @@ -0,0 +1,8 @@ +package cloud.localstack.docker.exception; + +public class LocalstackDockerException extends RuntimeException { + + public LocalstackDockerException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/src/main/java/cloud/localstack/lambda/DDBEventParser.java b/src/main/java/cloud/localstack/lambda/DDBEventParser.java new file mode 100644 index 0000000..946fe2e --- /dev/null +++ b/src/main/java/cloud/localstack/lambda/DDBEventParser.java @@ -0,0 +1,132 @@ +package cloud.localstack.lambda; + +import com.amazonaws.services.dynamodbv2.model.*; +import com.amazonaws.services.lambda.runtime.events.DynamodbEvent; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.stream.Collectors; + +import static cloud.localstack.LambdaExecutor.get; + +public class DDBEventParser { + + public static DynamodbEvent parse(List> records) { + + DynamodbEvent dynamoDbEvent = new DynamodbEvent(); + dynamoDbEvent.setRecords(new LinkedList<>()); + for (Map record : records) { + DynamodbEvent.DynamodbStreamRecord r = new DynamodbEvent.DynamodbStreamRecord(); + dynamoDbEvent.getRecords().add(r); + + r.setEventSourceARN((String) get(record, "eventSourceARN")); + r.setEventSource((String) get(record, "eventSource")); + r.setEventName(OperationType.fromValue((String) get(record, "eventName"))); + r.setEventVersion((String) get(record, "eventVersion")); + + r.setEventID((String) get(record, "eventID")); + r.setAwsRegion((String) get(record, "awsRegion")); + r.setUserIdentity((Identity) get(record, "userIdentity")); + + Map ddbMap = (Map) record.get("dynamodb"); + + //DynamodbEvent + StreamRecord streamRecord = new StreamRecord(); + r.setDynamodb(streamRecord); + + Date date = (Date) get(ddbMap, "approximateCreationDateTime"); + streamRecord.setApproximateCreationDateTime(date != null ? date : new Date()); + + streamRecord.setSequenceNumber(UUID.randomUUID().toString()); + + streamRecord.setKeys(fromSimpleMap((Map) get(ddbMap, "Keys"))); + streamRecord.setNewImage(fromSimpleMap((Map) get(ddbMap, "NewImage"))); + streamRecord.setOldImage(fromSimpleMap((Map) get(ddbMap, "OldImage"))); + + streamRecord.setSizeBytes(((Integer) get(ddbMap, "SizeBytes")).longValue()); + streamRecord.setStreamViewType((String) get(ddbMap, "StreamViewType")); + + } + + return dynamoDbEvent; + } + + public static Map fromSimpleMap(Map map) { + if(map == null) { + return null; + } else { + LinkedHashMap result = new LinkedHashMap<>(); + map.entrySet().stream().forEach(entry -> + result.put(entry.getKey(),toAttributeValue(entry.getValue())) + ); + + return result; + } + } + + /** + * Reads a previously created Map of Maps in cloud.localstack.LambdaExecutor into Attribute Value + * @param value the object which is expected to be Map + * @return parsed AttributeValue + */ + public static AttributeValue toAttributeValue(Object value) { + + AttributeValue result = new AttributeValue(); + + if(value instanceof Map) { + Map.Entry entry = ((Map) value).entrySet().iterator().next(); + String key = entry.getKey(); + + switch (key) { + case "M": + Map in1 = (Map) entry.getValue(); + result.setM(new LinkedHashMap<>()); + in1.entrySet().stream().forEach(mapEntry -> + result.addMEntry(mapEntry.getKey(), toAttributeValue(mapEntry.getValue())) + ); + break; + case "SS": + result.setSS((List) entry.getValue()); + break; + case "BS": + List in2 = (List) entry.getValue(); + result.setBS(in2.stream() + .map(element -> ByteBuffer.wrap(element.getBytes())) + .collect(Collectors.toList())); + break; + case "NS": + List in3 = (List) entry.getValue(); + result.setNS(in3.stream().map(Object::toString).collect(Collectors.toList())); + break; + case "L": + List in4 =(List) entry.getValue(); + result.setL(in4.stream() + .map(el -> toAttributeValue(el)) + .collect(Collectors.toList()) + ); + break; + case "NULL": + result.withNULL(Boolean.parseBoolean(entry.getValue().toString())); + break; + case "BOOL": + result.withBOOL(Boolean.parseBoolean(entry.getValue().toString())); + break; + case "S": + result.withS((String) entry.getValue()); + break; + case "N": + String stringValue = entry.getValue().toString(); + result.withN(stringValue); + break; + case "B": + result.withBS(ByteBuffer.wrap(entry.getValue().toString().getBytes())); + break; + default: + result.setM(new LinkedHashMap<>()); + break; + } + } + return result; + } + +} diff --git a/src/main/java/cloud/localstack/lambda/S3EventParser.java b/src/main/java/cloud/localstack/lambda/S3EventParser.java new file mode 100644 index 0000000..8fc1877 --- /dev/null +++ b/src/main/java/cloud/localstack/lambda/S3EventParser.java @@ -0,0 +1,95 @@ +package cloud.localstack.lambda; +import static cloud.localstack.LambdaExecutor.get; +import com.amazonaws.services.lambda.runtime.events.S3Event; +import com.amazonaws.services.s3.event.S3EventNotification; +import org.joda.time.DateTime; + +import java.util.*; + + +public class S3EventParser { + + public static S3Event parse(List> records) { + + // parse out items to construct the S3EventNotification + Map record = records.get(0); + Map rp = (Map) get(record, "requestParameters"); + String sip = (String) get(rp,"sourceIPAddress"); + + Map re = (Map) get(record, "responseElements"); + String xAmzld2 = (String) get(re,"x-amz-id-2"); + String xAmzRequestId = (String) get(re,"x-amz-request-id"); + + Map s3 = (Map) get(record, "s3"); + Map bk = (Map) get(s3, "bucket"); + Map oi = (Map) get(bk, "ownerIdentity"); + String bucketPrincipalId = (String) get(oi, "principalId"); + String bucketName = (String) get(bk,"name"); + String arn = (String) get(bk,"arn"); + String s3SchemaVersion = (String) get(s3, "s3SchemaVersion"); + + Map obj = (Map) get(s3, "object"); + String key = (String) get(obj,"key"); + Long size = ((Number) get(obj,"size")).longValue(); + String eTag = (String) get(obj,"eTag"); + String versionId = (String) get(obj,"versionId"); + String sequencer = (String) get(obj,"sequencer"); + String configurationId = (String) get(s3,"configurationId"); + + String awsRegion = (String) get(record, "awsRegion"); + String eventName = (String) get(record, "eventName"); + String eventSource = (String) get(record, "eventSource"); + String eventTime = (String) get(record, "eventTime"); + String eventVersion = (String) get(record, "eventVersion"); + + Map ui = (Map) get(record, "userIdentity"); + String principalId = (String) get(ui,"principalId"); + + // build up a S3Event to be passed to the Lambda + List s3Records = new LinkedList<>(); + + // bucket and S3ObjectEntity needed for S3Entity constructor + S3EventNotification.UserIdentityEntity bucketUserIdentityEntity = new S3EventNotification.UserIdentityEntity(bucketPrincipalId); + S3EventNotification.S3BucketEntity bucket = new S3EventNotification.S3BucketEntity( + bucketName, + bucketUserIdentityEntity, + arn); + + S3EventNotification.S3ObjectEntity s3ObjectEntity = new S3EventNotification.S3ObjectEntity( + key, + size, + eTag, + versionId, + sequencer); + + // S3Entity + S3EventNotification.S3Entity s3Entity = new S3EventNotification.S3Entity( + configurationId, + bucket, + s3ObjectEntity, + s3SchemaVersion); + + // build S3EventNotificationRecord + S3EventNotification.RequestParametersEntity requestParameters = new S3EventNotification.RequestParametersEntity(sip); + S3EventNotification.ResponseElementsEntity responseEntity = new S3EventNotification.ResponseElementsEntity(xAmzld2, xAmzRequestId); + S3EventNotification.UserIdentityEntity eventNotifyUserIdentityEntity = new S3EventNotification.UserIdentityEntity(principalId); + S3EventNotification.S3EventNotificationRecord s3record = new S3EventNotification.S3EventNotificationRecord( + awsRegion, + eventName, + eventSource, + eventTime, + eventVersion, + requestParameters, + responseEntity, + s3Entity, + eventNotifyUserIdentityEntity); + + // add the record to records list + s3Records.add(0, s3record); + + // finally hydrate S3Event + return new S3Event(s3Records); + + } + +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/CaseInsensitiveComparator.java b/src/main/java/org/ow2/proactive/process_tree_killer/CaseInsensitiveComparator.java new file mode 100644 index 0000000..f01bfde --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/CaseInsensitiveComparator.java @@ -0,0 +1,48 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.io.Serializable; +import java.util.Comparator; + + +/** + * Case-insensitive string comparator. + * + * @author Kohsuke Kawaguchi + */ +public final class CaseInsensitiveComparator implements Comparator, Serializable { + public static final Comparator INSTANCE = new CaseInsensitiveComparator(); + + private CaseInsensitiveComparator() { + } + + public int compare(String lhs, String rhs) { + return lhs.compareToIgnoreCase(rhs); + } + + private static final long serialVersionUID = 1L; +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/CyclicGraphDetector.java b/src/main/java/org/ow2/proactive/process_tree_killer/CyclicGraphDetector.java new file mode 100644 index 0000000..62bee62 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/CyclicGraphDetector.java @@ -0,0 +1,114 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Stack; + + +/** + * Traverses a directed graph and if it contains any cycle, throw an exception. + * + * @author Kohsuke Kawaguchi + */ +@SuppressWarnings("all") +public abstract class CyclicGraphDetector { + private final Set visited = new HashSet(); + + private final Set visiting = new HashSet(); + + private final Stack path = new Stack(); + + private final List topologicalOrder = new ArrayList(); + + public void run(Iterable allNodes) throws CycleDetectedException { + for (N n : allNodes) { + visit(n); + } + } + + /** + * Returns all the nodes in the topologically sorted order. + * That is, if there's an edge a->b, b always come earlier than a. + */ + public List getSorted() { + return topologicalOrder; + } + + /** + * List up edges from the given node (by listing nodes that those edges point to.) + * + * @return + * Never null. + */ + protected abstract Iterable getEdges(N n); + + private void visit(N p) throws CycleDetectedException { + if (!visited.add(p)) + return; + + visiting.add(p); + path.push(p); + for (N q : getEdges(p)) { + if (q == null) + continue; // ignore unresolved references + if (visiting.contains(q)) + detectedCycle(q); + visit(q); + } + visiting.remove(p); + path.pop(); + topologicalOrder.add(p); + } + + private void detectedCycle(N q) throws CycleDetectedException { + int i = path.indexOf(q); + path.push(q); + reactOnCycle(q, path.subList(i, path.size())); + } + + /** + * React on detected cycles - default implementation throws an exception. + * @param q + * @param cycle + * @throws CycleDetectedException + */ + protected void reactOnCycle(N q, List cycle) throws CycleDetectedException { + throw new CycleDetectedException(cycle); + } + + public static final class CycleDetectedException extends Exception { + public final List cycle; + + public CycleDetectedException(List cycle) { + super("Cycle detected: " + Util.join(cycle, " -> ")); + this.cycle = cycle; + } + } +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/EnvVars.java b/src/main/java/org/ow2/proactive/process_tree_killer/EnvVars.java new file mode 100644 index 0000000..42ed5b5 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/EnvVars.java @@ -0,0 +1,441 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; +import java.util.logging.Logger; + + +/** + * Environment variables. + * + *

+ * While all the platforms I tested (Linux 2.6, Solaris, and Windows XP) have the case sensitive + * environment variable table, Windows batch script handles environment variable in the case preserving + * but case insensitive way (that is, cmd.exe can get both FOO and foo as environment variables + * when it's launched, and the "set" command will display it accordingly, but "echo %foo%" results in + * echoing the value of "FOO", not "foo" — this is presumably caused by the behavior of the underlying + * Win32 API GetEnvironmentVariable acting in case insensitive way.) Windows users are also + * used to write environment variable case-insensitively (like %Path% vs %PATH%), and you can see many + * documents on the web that claims Windows environment variables are case insensitive. + * + *

+ * So for a consistent cross platform behavior, it creates the least confusion to make the table + * case insensitive but case preserving. + * + *

+ * In Jenkins, often we need to build up "environment variable overrides" + * on master, then to execute the process on slaves. This causes a problem + * when working with variables like PATH. So to make this work, + * we introduce a special convention PATH+FOO — all entries + * that starts with PATH+ are merged and prepended to the inherited + * PATH variable, on the process where a new process is executed. + * + * @author Kohsuke Kawaguchi + */ +@SuppressWarnings("all") +public class EnvVars extends TreeMap { + private static Logger LOGGER = Logger.getLogger(EnvVars.class.getName()); + + /** + * If this {@link EnvVars} object represents the whole environment variable set, + * not just a partial list used for overriding later, then we need to know + * the platform for which this env vars are targeted for, or else we won't know + * how to merge variables properly. + * + *

+ * So this property remembers that information. + */ + private Platform platform; + + public EnvVars() { + super(CaseInsensitiveComparator.INSTANCE); + } + + public EnvVars(Map m) { + this(); + putAll(m); + + // because of the backward compatibility, some parts of Jenkins passes + // EnvVars as Map so downcasting is safer. + if (m instanceof EnvVars) { + EnvVars lhs = (EnvVars) m; + this.platform = lhs.platform; + } + } + + public EnvVars(EnvVars m) { + // this constructor is so that in future we can get rid of the downcasting. + this((Map) m); + } + + /** + * Builds an environment variables from an array of the form "key","value","key","value"... + */ + public EnvVars(String... keyValuePairs) { + this(); + if (keyValuePairs.length % 2 != 0) + throw new IllegalArgumentException(Arrays.asList(keyValuePairs).toString()); + for (int i = 0; i < keyValuePairs.length; i += 2) + put(keyValuePairs[i], keyValuePairs[i + 1]); + } + + /** + * Overrides the current entry by the given entry. + * + *

+ * Handles PATH+XYZ notation. + */ + public void override(String key, String value) { + if (value == null || value.length() == 0) { + remove(key); + return; + } + + int idx = key.indexOf('+'); + if (idx > 0) { + String realKey = key.substring(0, idx); + String v = get(realKey); + if (v == null) + v = value; + else { + // we might be handling environment variables for a slave that can have different path separator + // than the master, so the following is an attempt to get it right. + // it's still more error prone that I'd like. + char ch = platform == null ? File.pathSeparatorChar : platform.pathSeparator; + v = value + ch + v; + } + put(realKey, v); + return; + } + + put(key, value); + } + + /** + * Overrides all values in the map by the given map. + * See {@link #override(String, String)}. + * @return this + */ + public EnvVars overrideAll(Map all) { + for (Map.Entry e : all.entrySet()) { + override(e.getKey(), e.getValue()); + } + return this; + } + + /** + * Calculates the order to override variables. + * + * Sort variables with topological sort with their reference graph. + * + * This is package accessible for testing purpose. + */ + static class OverrideOrderCalculator { + /** + * Extract variables referred directly from a variable. + */ + private static class TraceResolver implements VariableResolver { + private final Comparator comparator; + + public Set referredVariables; + + public TraceResolver(Comparator comparator) { + this.comparator = comparator; + clear(); + } + + public void clear() { + referredVariables = new TreeSet(comparator); + } + + public String resolve(String name) { + referredVariables.add(name); + return ""; + } + } + + private static class VariableReferenceSorter extends CyclicGraphDetector { + // map from a variable to a set of variables that variable refers. + private final Map> refereeSetMap; + + public VariableReferenceSorter(Map> refereeSetMap) { + this.refereeSetMap = refereeSetMap; + } + + @Override + protected Iterable getEdges(String n) { + // return variables referred from the variable. + if (!refereeSetMap.containsKey(n)) { + // there is a case a non-existing variable is referred... + return Collections.emptySet(); + } + return refereeSetMap.get(n); + } + }; + + private final Comparator comparator; + + private final EnvVars target; + + private final Map overrides; + + private Map> refereeSetMap; + + private List orderedVariableNames; + + public OverrideOrderCalculator(EnvVars target, Map overrides) { + comparator = target.comparator(); + this.target = target; + this.overrides = overrides; + scan(); + } + + public List getOrderedVariableNames() { + return orderedVariableNames; + } + + // Cut the reference to the variable in a cycle. + private void cutCycleAt(String referee, List cycle) { + // cycle contains variables in referrer-to-referee order. + // This should not be negative, for the first and last one is same. + int refererIndex = cycle.lastIndexOf(referee) - 1; + + assert (refererIndex >= 0); + String referrer = cycle.get(refererIndex); + boolean removed = refereeSetMap.get(referrer).remove(referee); + assert (removed); + LOGGER.warning(String.format("Cyclic reference detected: %s", Util.join(cycle, " -> "))); + LOGGER.warning(String.format("Cut the reference %s -> %s", referrer, referee)); + } + + // Cut the variable reference in a cycle. + private void cutCycle(List cycle) { + // if an existing variable is contained in that cycle, + // cut the cycle with that variable: + // existing: + // PATH=/usr/bin + // overriding: + // PATH1=/usr/local/bin:${PATH} + // PATH=/opt/something/bin:${PATH1} + // then consider reference PATH1 -> PATH can be ignored. + for (String referee : cycle) { + if (target.containsKey(referee)) { + cutCycleAt(referee, cycle); + return; + } + } + + // if not, cut the reference to the first one. + cutCycleAt(cycle.get(0), cycle); + } + + /** + * Scan all variables and list all referring variables. + */ + public void scan() { + refereeSetMap = new TreeMap>(comparator); + List extendingVariableNames = new ArrayList(); + + TraceResolver resolver = new TraceResolver(comparator); + + for (Map.Entry entry : overrides.entrySet()) { + if (entry.getKey().indexOf('+') > 0) { + // XYZ+AAA variables should be always processed in last. + extendingVariableNames.add(entry.getKey()); + continue; + } + resolver.clear(); + Util.replaceMacro(entry.getValue(), resolver); + + // Variables directly referred from the current scanning variable. + Set refereeSet = resolver.referredVariables; + // Ignore self reference. + refereeSet.remove(entry.getKey()); + refereeSetMap.put(entry.getKey(), refereeSet); + } + + VariableReferenceSorter sorter; + while (true) { + sorter = new VariableReferenceSorter(refereeSetMap); + try { + sorter.run(refereeSetMap.keySet()); + } catch (CyclicGraphDetector.CycleDetectedException e) { + // cyclic reference found. + // cut the cycle and retry. + @SuppressWarnings("unchecked") + List cycle = e.cycle; + cutCycle(cycle); + continue; + } + break; + } + + // When A refers B, the last appearance of B always comes after + // the last appearance of A. + List reversedDuplicatedOrder = new ArrayList(sorter.getSorted()); + Collections.reverse(reversedDuplicatedOrder); + + orderedVariableNames = new ArrayList(overrides.size()); + for (String key : reversedDuplicatedOrder) { + if (overrides.containsKey(key) && !orderedVariableNames.contains(key)) { + orderedVariableNames.add(key); + } + } + Collections.reverse(orderedVariableNames); + orderedVariableNames.addAll(extendingVariableNames); + } + } + + /** + * Overrides all values in the map by the given map. Expressions in values will be expanded. + * See {@link #override(String, String)}. + * @return this + */ + public EnvVars overrideExpandingAll(Map all) { + for (String key : new OverrideOrderCalculator(this, all).getOrderedVariableNames()) { + override(key, expand(all.get(key))); + } + return this; + } + + /** + * Resolves environment variables against each other. + */ + public static void resolve(Map env) { + for (Map.Entry entry : env.entrySet()) { + entry.setValue(Util.replaceMacro(entry.getValue(), env)); + } + } + + /** + * Convenience message + * @since 1.485 + **/ + public String get(String key, String defaultValue) { + String v = get(key); + if (v == null) + v = defaultValue; + return v; + } + + @Override + public String put(String key, String value) { + if (value == null) + throw new IllegalArgumentException("Null value not allowed as an environment variable: " + key); + return super.put(key, value); + } + + /** + * Add a key/value but only if the value is not-null. Otherwise no-op. + * @since 1.556 + */ + public void putIfNotNull(String key, String value) { + if (value != null) + put(key, value); + } + + /** + * Takes a string that looks like "a=b" and adds that to this map. + */ + public void addLine(String line) { + int sep = line.indexOf('='); + if (sep > 0) { + put(line.substring(0, sep), line.substring(sep + 1)); + } + } + + /** + * Expands the variables in the given string by using environment variables represented in 'this'. + */ + public String expand(String s) { + return Util.replaceMacro(s, this); + } + + /** + * Creates a magic cookie that can be used as the model environment variable + * when we later kill the processes. + */ + public static EnvVars createCookie() { + return new EnvVars("HUDSON_COOKIE", UUID.randomUUID().toString()); + } + + // /** + // * Obtains the environment variables of a remote peer. + // * + // * @param channel + // * Can be null, in which case the map indicating "N/A" will be returned. + // * @return + // * A fresh copy that can be owned and modified by the caller. + // */ + // public static EnvVars getRemote(VirtualChannel channel) throws IOException, InterruptedException { + // if(channel==null) + // return new EnvVars("N/A","N/A"); + // return channel.call(new GetEnvVars()); + // } + + // private static final class GetEnvVars extends MasterToSlaveCallable { + // public EnvVars call() { + // return new EnvVars(EnvVars.masterEnvVars); + // } + // private static final long serialVersionUID = 1L; + // } + + /** + * Environmental variables that we've inherited. + * + *

+ * Despite what the name might imply, this is the environment variable + * of the current JVM process. And therefore, it is Jenkins master's environment + * variables only when you access this from the master. + * + *

+ * If you access this field from slaves, then this is the environment + * variable of the slave agent. + */ + // public static final Map masterEnvVars = initMaster(); + + // private static EnvVars initMaster() { + // EnvVars vars = new EnvVars(System.getenv()); + // vars.platform = Platform.current(); + // if(Main.isUnitTest || Main.isDevelopmentMode) + // // if unit test is launched with maven debug switch, + // // we need to prevent forked Maven processes from seeing it, or else + // // they'll hang + // vars.remove("MAVEN_OPTS"); + // return vars; + // } +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/Platform.java b/src/main/java/org/ow2/proactive/process_tree_killer/Platform.java new file mode 100644 index 0000000..2a090b6 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/Platform.java @@ -0,0 +1,79 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.io.File; +import java.util.Locale; + + +/** + * Strategy object that absorbs the platform differences. + * + *

+ * Do not switch/case on this enum, or do a comparison, as we may add new constants. + * + * @author Kohsuke Kawaguchi + */ +public enum Platform { + WINDOWS(';'), + UNIX(':'); + + /** + * The character that separates paths in environment variables like PATH and CLASSPATH. + * On Windows ';' and on Unix ':'. + * + * @see File#pathSeparator + */ + public final char pathSeparator; + + private Platform(char pathSeparator) { + this.pathSeparator = pathSeparator; + } + + public static Platform current() { + if (File.pathSeparatorChar == ':') + return UNIX; + return WINDOWS; + } + + public static boolean isDarwin() { + // according to http://developer.apple.com/technotes/tn2002/tn2110.html + return System.getProperty("os.name").toLowerCase(Locale.ENGLISH).startsWith("mac"); + } + + /** + * Returns true if we run on Mac OS X >= 10.6 + */ + public static boolean isSnowLeopardOrLater() { + try { + return isDarwin() && + new VersionNumber(System.getProperty("os.version")).compareTo(new VersionNumber("10.6")) >= 0; + } catch (IllegalArgumentException e) { + // failed to parse the version + return false; + } + } +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/ProcessKiller.java b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessKiller.java new file mode 100644 index 0000000..18a2204 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessKiller.java @@ -0,0 +1,77 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.io.IOException; +import java.io.Serializable; + + +/** + * Extension point that defines more elaborate way of killing processes, such as + * sudo or pfexec, for {@link ProcessTree}. + * + *

Lifecycle

+ *

+ * Each implementation of {@link ProcessKiller} is instantiated once on the master. + * Whenever a process needs to be killed, those implementations are serialized and sent over + * to the appropriate slave, then the {@link #kill(ProcessTree.OSProcess)} method is invoked + * to attempt to kill the process. + * + *

+ * One of the consequences of this design is that the implementation should be stateless + * and concurrent-safe. That is, the {@link #kill(ProcessTree.OSProcess)} method can be invoked by multiple threads + * concurrently on the single instance. + * + *

+ * Another consequence of this design is that if your {@link ProcessKiller} requires configuration, + * it needs to be serializable, and configuration needs to be updated atomically, as another + * thread may be calling into {@link #kill(ProcessTree.OSProcess)} just when you are updating your configuration. + * + * @author jpederzolli + * @author Kohsuke Kawaguchi + * @since 1.362 + */ +public abstract class ProcessKiller implements Serializable { + + /** + * Attempts to kill the given process. + * + * @param process process to be killed. Always a {@linkplain ProcessTree.Local local process}. + * @return + * true if the killing was successful, and Hudson won't try to use other {@link ProcessKiller} + * implementations to kill the process. false if the killing failed or is unattempted, and Hudson will continue + * to use the rest of the {@link ProcessKiller} implementations to try to kill the process. + * @throws IOException + * The caller will log this exception and otherwise treat as if the method returned false, and moves on + * to the next killer. + * @throws InterruptedException + * if the callee performs a time consuming operation and if the thread is canceled, do not catch + * {@link InterruptedException} and just let it thrown from the method. + */ + public abstract boolean kill(ProcessTree.OSProcess process) throws IOException, InterruptedException; + + private static final long serialVersionUID = 1L; +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTree.java b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTree.java new file mode 100644 index 0000000..a24822f --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTree.java @@ -0,0 +1,1182 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.ptr.IntByReference; +import org.jvnet.winp.WinProcess; +import org.jvnet.winp.WinpException; + +import java.io.*; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.file.Files; +import java.rmi.Remote; +import java.util.*; +import java.util.Map.Entry; +import java.util.logging.Level; +import java.util.logging.Logger; + +import static com.sun.jna.Pointer.NULL; +import static java.util.logging.Level.*; +import static org.ow2.proactive.process_tree_killer.jna.GNUCLibrary.LIBC; + + +/** + * Represents a snapshot of the process tree of the current system. + * + *

+ * A {@link ProcessTree} is really conceptually a map from process ID to a {@link OSProcess} object. + * When Hudson runs on platforms that support process introspection, this allows you to introspect + * and do some useful things on processes. On other platforms, the implementation falls back to + * "do nothing" behavior. + * + *

+ * {@link ProcessTree} is remotable. + * + * @author Kohsuke Kawaguchi + * @since 1.315 + */ +@SuppressWarnings("all") +public abstract class ProcessTree + implements Iterable, ProcessTreeRemoting.IProcessTree, Serializable { + /** + * To be filled in the constructor of the derived type. + */ + protected final Map processes = new HashMap(); + + // instantiation only allowed for subtypes in this class + private ProcessTree() { + } + + /** + * Gets the process given a specific ID, or null if no such process exists. + */ + public final OSProcess get(int pid) { + return processes.get(pid); + } + + /** + * Lists all the processes in the system. + */ + public final Iterator iterator() { + return processes.values().iterator(); + } + + /** + * Try to convert {@link Process} into this process object + * or null if it fails (for example, maybe the snapshot is taken after + * this process has already finished.) + */ + public abstract OSProcess get(Process proc); + + /** + * Kills all the processes that have matching environment variables. + * + *

+ * In this method, the method is given a + * "model environment variables", which is a list of environment variables + * and their values that are characteristic to the launched process. + * The implementation is expected to find processes + * in the system that inherit these environment variables, and kill + * them all. This is suitable for locating daemon processes + * that cannot be tracked by the regular ancestor/descendant relationship. + */ + public abstract void killAll(Map modelEnvVars) throws InterruptedException; + + /** + * Convenience method that does {@link #killAll(Map)} and {@link OSProcess#killRecursively()}. + * This is necessary to reliably kill the process and its descendants, as some OS + * may not implement {@link #killAll(Map)}. + * + * Either of the parameter can be null. + */ + public void killAll(Process proc, Map modelEnvVars) throws InterruptedException { + LOGGER.fine("killAll: process=" + proc + " and envs=" + modelEnvVars); + OSProcess p = get(proc); + if (p != null) + p.killRecursively(); + if (modelEnvVars != null) + killAll(modelEnvVars); + } + + /** + * Represents a process. + */ + public abstract class OSProcess implements ProcessTreeRemoting.IOSProcess, Serializable { + final int pid; + + // instantiation only allowed for subtypes in this class + private OSProcess(int pid) { + this.pid = pid; + } + + public final int getPid() { + return pid; + } + + /** + * Gets the parent process. This method may return null, because + * there's no guarantee that we are getting a consistent snapshot + * of the whole system state. + */ + public abstract OSProcess getParent(); + + /* package */ final ProcessTree getTree() { + return ProcessTree.this; + } + + /** + * Immediate child processes. + */ + public final List getChildren() { + List r = new ArrayList(); + for (OSProcess p : ProcessTree.this) + if (p.getParent() == this) + r.add(p); + return r; + } + + /** + * Kills this process. + */ + public abstract void kill() throws InterruptedException; + + /** + * Kills this process and all the descendants. + *

+ * Note that the notion of "descendants" is somewhat vague, + * in the presence of such things like daemons. On platforms + * where the recursive operation is not supported, this just kills + * the current process. + */ + public abstract void killRecursively() throws InterruptedException; + + /** + * Gets the command-line arguments of this process. + * + *

+ * On Windows, where the OS models command-line arguments as a single string, this method + * computes the approximated tokenization. + */ + public abstract List getArguments(); + + /** + * Obtains the environment variables of this process. + * + * @return + * empty map if failed (for example because the process is already dead, + * or the permission was denied.) + */ + public abstract EnvVars getEnvironmentVariables(); + + /** + * Given the environment variable of a process and the "model environment variable" that Hudson + * used for launching the build, returns true if there's a match (which means the process should + * be considered a descendant of a build.) + */ + public final boolean hasMatchingEnvVars(Map modelEnvVar) { + if (modelEnvVar.isEmpty()) + // sanity check so that we don't start rampage. + return false; + + SortedMap envs = getEnvironmentVariables(); + for (Entry e : modelEnvVar.entrySet()) { + String v = envs.get(e.getKey()); + if (v == null || !v.equals(e.getValue())) + return false; // no match + } + + return true; + } + + // /** + // * Executes a chunk of code at the same machine where this process resides. + // */ + // public T act(ProcessCallable callable) throws IOException, InterruptedException { + // return callable.invoke(this, FilePath.localChannel); + // } + + Object writeReplace() { + return new SerializedProcess(pid); + } + } + + /** + * Serialized form of {@link OSProcess} is the PID and {@link ProcessTree} + */ + private final class SerializedProcess implements Serializable { + private final int pid; + + private static final long serialVersionUID = 1L; + + private SerializedProcess(int pid) { + this.pid = pid; + } + + Object readResolve() { + return get(pid); + } + } + + // /** + // * Code that gets executed on the machine where the {@link OSProcess} is local. + // * Used to act on {@link OSProcess}. + // * + // * @see OSProcess#act(ProcessCallable) + // */ + // public interface ProcessCallable extends Serializable { + // /** + // * Performs the computational task on the node where the data is located. + // * + // * @param process + // * {@link OSProcess} that represents the local process. + // * @param channel + // * The "back pointer" of the {@link Channel} that represents the communication + // * with the node from where the code was sent. + // */ + // T invoke(OSProcess process, VirtualChannel channel) throws IOException; + // } + + /** + * Gets the {@link ProcessTree} of the current system + * that JVM runs in, or in the worst case return the default one + * that's not capable of killing descendants at all. + */ + public static ProcessTree get() { + if (!enabled) + return DEFAULT; + + try { + if (File.pathSeparatorChar == ';') + return new Windows(); + + String os = fixNull(System.getProperty("os.name")); + if (os.equals("Linux")) + return new Linux(); + if (os.equals("SunOS")) + return new Solaris(); + if (os.equals("Mac OS X")) + return new Darwin(); + } catch (LinkageError e) { + LOGGER.log(Level.WARNING, "Failed to load winp. Reverting to the default", e); + enabled = false; + } + + return DEFAULT; + } + + private static String fixNull(String s) { + if (s == null) + return ""; + else + return s; + } + + // + // + // implementation follows + //------------------------------------------- + // + + /** + * Empty process list as a default value if the platform doesn't support it. + */ + /* package */ static final ProcessTree DEFAULT = new Local() { + public OSProcess get(final Process proc) { + return new OSProcess(-1) { + public OSProcess getParent() { + return null; + } + + public void killRecursively() { + // fall back to a single process killer + proc.destroy(); + } + + public void kill() throws InterruptedException { + proc.destroy(); + } + + public List getArguments() { + return Collections.emptyList(); + } + + public EnvVars getEnvironmentVariables() { + return new EnvVars(); + } + }; + } + + public void killAll(Map modelEnvVars) { + // no-op + } + }; + + private static final class Windows extends Local { + Windows() { + for (final WinProcess p : WinProcess.all()) { + int pid = p.getPid(); + if (pid == 0 || pid == 4) + continue; // skip the System Idle and System processes + super.processes.put(pid, new OSProcess(pid) { + private EnvVars env; + + private List args; + + public OSProcess getParent() { + // windows process doesn't have parent/child relationship + return null; + } + + public void killRecursively() throws InterruptedException { + LOGGER.finer("Killing recursively " + getPid()); + p.killRecursively(); + } + + public void kill() throws InterruptedException { + LOGGER.finer("Killing " + getPid()); + p.kill(); + } + + @Override + public synchronized List getArguments() { + if (args == null) + args = Arrays.asList(QuotedStringTokenizer.tokenize(p.getCommandLine())); + return args; + } + + @Override + public synchronized EnvVars getEnvironmentVariables() { + if (env != null) + return env; + env = new EnvVars(); + + try { + env.putAll(p.getEnvironmentVariables()); + } catch (WinpException e) { + LOGGER.log(FINE, "Failed to get environment variable ", e); + } + return env; + } + }); + + } + } + + @Override + public OSProcess get(Process proc) { + return get(new WinProcess(proc).getPid()); + } + + public void killAll(Map modelEnvVars) throws InterruptedException { + for (OSProcess p : this) { + if (p.getPid() < 10) + continue; // ignore system processes like "idle process" + + LOGGER.finest("Considering to kill " + p.getPid()); + + boolean matched; + try { + matched = p.hasMatchingEnvVars(modelEnvVars); + } catch (WinpException e) { + // likely a missing privilege + LOGGER.log(FINEST, " Failed to check environment variable match", e); + continue; + } + + if (matched) + p.killRecursively(); + else + LOGGER.finest("Environment variable didn't match"); + + } + } + + static { + WinProcess.enableDebugPrivilege(); + } + } + + static abstract class Unix extends Local { + + @Override + public OSProcess get(Process proc) { + try { + return get((Integer) UnixReflection.pid(proc)); + } catch (IllegalAccessError e) { // impossible + IllegalAccessError x = new IllegalAccessError(); + x.initCause(e); + throw x; + } + } + + public void killAll(Map modelEnvVars) throws InterruptedException { + for (OSProcess p : this) + if (p.hasMatchingEnvVars(modelEnvVars)) + p.killRecursively(); + } + } + + /** + * {@link ProcessTree} based on /proc. + */ + static abstract class ProcfsUnix extends Unix { + ProcfsUnix() { + File[] processes = new File("/proc").listFiles(new FileFilter() { + public boolean accept(File f) { + return f.isDirectory(); + } + }); + if (processes == null) { + LOGGER.info("No /proc"); + return; + } + + for (File p : processes) { + int pid; + try { + pid = Integer.parseInt(p.getName()); + } catch (NumberFormatException e) { + // other sub-directories + continue; + } + try { + this.processes.put(pid, createProcess(pid)); + } catch (IOException e) { + // perhaps the process status has changed since we obtained a directory listing + } + } + } + + protected abstract OSProcess createProcess(int pid) throws IOException; + } + + /** + * A process. + */ + public abstract class UnixProcess extends OSProcess { + protected UnixProcess(int pid) { + super(pid); + } + + protected final File getFile(String relativePath) { + return new File(new File("/proc/" + getPid()), relativePath); + } + + /** + * Tries to kill this process. + */ + public void kill() throws InterruptedException { + try { + int pid = getPid(); + LOGGER.fine("Killing pid=" + pid); + UnixReflection.destroy(pid); + } catch (IllegalAccessException e) { + // this is impossible + IllegalAccessError x = new IllegalAccessError(); + x.initCause(e); + throw x; + } catch (InvocationTargetException e) { + // tunnel serious errors + if (e.getTargetException() instanceof Error) + throw (Error) e.getTargetException(); + // otherwise log and let go. I need to see when this happens + LOGGER.log(Level.INFO, "Failed to terminate pid=" + getPid(), e); + } + } + + public void killRecursively() throws InterruptedException { + LOGGER.fine("Recursively killing pid=" + getPid()); + for (OSProcess p : getChildren()) + p.killRecursively(); + kill(); + } + + /** + * Obtains the argument list of this process. + * + * @return + * empty list if failed (for example because the process is already dead, + * or the permission was denied.) + */ + public abstract List getArguments(); + } + + /** + * Reflection used in the Unix support. + */ + private static final class UnixReflection { + /** + * Field to access the PID of the process. + * Required for Java 8 and older JVMs. + */ + private static final Field JAVA8_PID_FIELD; + + /** + * Field to access the PID of the process. + * Required for Java 9 and above until this is replaced by multi-release JAR. + */ + private static final Method JAVA9_PID_METHOD; + + /** + * Method to destroy a process, given pid. + * + * Looking at the JavaSE source code, this is using SIGTERM (15) + */ + private static final Method JAVA8_DESTROY_PROCESS; + private static final Method JAVA_9_PROCESSHANDLE_OF; + private static final Method JAVA_9_PROCESSHANDLE_DESTROY; + + static { + try { + if (isPostJava8()) { + Class clazz = Process.class; + JAVA9_PID_METHOD = clazz.getMethod("pid"); + JAVA8_PID_FIELD = null; + Class processHandleClazz = Class.forName("java.lang.ProcessHandle"); + JAVA_9_PROCESSHANDLE_OF = processHandleClazz.getMethod("of", long.class); + JAVA_9_PROCESSHANDLE_DESTROY = processHandleClazz.getMethod("destroy"); + JAVA8_DESTROY_PROCESS = null; + } else { + Class clazz = Class.forName("java.lang.UNIXProcess"); + JAVA8_PID_FIELD = clazz.getDeclaredField("pid"); + JAVA8_PID_FIELD.setAccessible(true); + JAVA9_PID_METHOD = null; + + JAVA8_DESTROY_PROCESS = clazz.getDeclaredMethod("destroyProcess", int.class, boolean.class); + JAVA8_DESTROY_PROCESS.setAccessible(true); + JAVA_9_PROCESSHANDLE_OF = null; + JAVA_9_PROCESSHANDLE_DESTROY = null; + } + } catch (ClassNotFoundException | NoSuchFieldException | NoSuchMethodException e) { + LinkageError x = new LinkageError("Cannot initialize reflection for Unix Processes", e); + throw x; + } + } + + public static void destroy(int pid) throws IllegalAccessException, + InvocationTargetException { + if (JAVA8_DESTROY_PROCESS != null) { + JAVA8_DESTROY_PROCESS.invoke(null, pid, false); + } else { + final Optional handle = (Optional)JAVA_9_PROCESSHANDLE_OF.invoke(null, pid); + if (handle.isPresent()) { + JAVA_9_PROCESSHANDLE_DESTROY.invoke(handle.get()); + } + } + } + + public static int pid(Process proc) { + try { + if (JAVA8_PID_FIELD != null) { + return JAVA8_PID_FIELD.getInt(proc); + } else { + long pid = (long) JAVA9_PID_METHOD.invoke(proc); + if (pid > Integer.MAX_VALUE) { + throw new IllegalAccessError("PID is out of bounds: " + pid); + } + return (int) pid; + } + } catch (IllegalAccessException | InvocationTargetException e) { // impossible + IllegalAccessError x = new IllegalAccessError(); + x.initCause(e); + throw x; + } + } + + private static String getJavaVersionFromSystemProperty(){ + return System.getProperty("java.version"); + } + + private static boolean isPostJava8(){ + return !getJavaVersionFromSystemProperty().startsWith("1."); + } + + } + + static class Linux extends ProcfsUnix { + protected LinuxProcess createProcess(int pid) throws IOException { + return new LinuxProcess(pid); + } + + class LinuxProcess extends UnixProcess { + private int ppid = -1; + + private EnvVars envVars; + + private List arguments; + + LinuxProcess(int pid) throws IOException { + super(pid); + + BufferedReader r = new BufferedReader(new FileReader(getFile("status"))); + try { + String line; + while ((line = r.readLine()) != null) { + line = line.toLowerCase(Locale.ENGLISH); + if (line.startsWith("ppid:")) { + ppid = Integer.parseInt(line.substring(5).trim()); + break; + } + } + } finally { + r.close(); + } + if (ppid == -1) + throw new IOException("Failed to parse PPID from /proc/" + pid + "/status"); + } + + public OSProcess getParent() { + return get(ppid); + } + + public synchronized List getArguments() { + if (arguments != null) + return arguments; + arguments = new ArrayList(); + try { + byte[] cmdline = readFileToByteArray(getFile("cmdline")); + int pos = 0; + for (int i = 0; i < cmdline.length; i++) { + byte b = cmdline[i]; + if (b == 0) { + arguments.add(new String(cmdline, pos, i - pos)); + pos = i + 1; + } + } + } catch (IOException e) { + // failed to read. this can happen under normal circumstances (most notably permission denied) + // so don't report this as an error. + } + arguments = Collections.unmodifiableList(arguments); + return arguments; + } + + public synchronized EnvVars getEnvironmentVariables() { + if (envVars != null) + return envVars; + envVars = new EnvVars(); + try { + byte[] environ = readFileToByteArray(getFile("environ")); + int pos = 0; + for (int i = 0; i < environ.length; i++) { + byte b = environ[i]; + if (b == 0) { + envVars.addLine(new String(environ, pos, i - pos)); + pos = i + 1; + } + } + } catch (IOException e) { + // failed to read. this can happen under normal circumstances (most notably permission denied) + // so don't report this as an error. + } + return envVars; + } + } + + public byte[] readFileToByteArray(File file) throws IOException { + return Files.readAllBytes(file.toPath()); + } + } + + /** + * Implementation for Solaris that uses /proc. + * + * Amazingly, this single code works for both 32bit and 64bit Solaris, despite the fact + * that does a lot of pointer manipulation and what not. + */ + static class Solaris extends ProcfsUnix { + protected OSProcess createProcess(final int pid) throws IOException { + return new SolarisProcess(pid); + } + + private class SolarisProcess extends UnixProcess { + private final int ppid; + + /** + * Address of the environment vector. Even on 64bit Solaris this is still 32bit pointer. + */ + private final int envp; + + /** + * Similarly, address of the arguments vector. + */ + private final int argp; + + private final int argc; + + private EnvVars envVars; + + private List arguments; + + private SolarisProcess(int pid) throws IOException { + super(pid); + + RandomAccessFile psinfo = new RandomAccessFile(getFile("psinfo"), "r"); + try { + // see http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/sys/procfs.h + //typedef struct psinfo { + // int pr_flag; /* process flags */ + // int pr_nlwp; /* number of lwps in the process */ + // pid_t pr_pid; /* process id */ + // pid_t pr_ppid; /* process id of parent */ + // pid_t pr_pgid; /* process id of process group leader */ + // pid_t pr_sid; /* session id */ + // uid_t pr_uid; /* real user id */ + // uid_t pr_euid; /* effective user id */ + // gid_t pr_gid; /* real group id */ + // gid_t pr_egid; /* effective group id */ + // uintptr_t pr_addr; /* address of process */ + // size_t pr_size; /* size of process image in Kbytes */ + // size_t pr_rssize; /* resident set size in Kbytes */ + // dev_t pr_ttydev; /* controlling tty device (or PRNODEV) */ + // ushort_t pr_pctcpu; /* % of recent cpu time used by all lwps */ + // ushort_t pr_pctmem; /* % of system memory used by process */ + // timestruc_t pr_start; /* process start time, from the epoch */ + // timestruc_t pr_time; /* cpu time for this process */ + // timestruc_t pr_ctime; /* cpu time for reaped children */ + // char pr_fname[PRFNSZ]; /* name of exec'ed file */ + // char pr_psargs[PRARGSZ]; /* initial characters of arg list */ + // int pr_wstat; /* if zombie, the wait() status */ + // int pr_argc; /* initial argument count */ + // uintptr_t pr_argv; /* address of initial argument vector */ + // uintptr_t pr_envp; /* address of initial environment vector */ + // char pr_dmodel; /* data model of the process */ + // lwpsinfo_t pr_lwp; /* information for representative lwp */ + //} psinfo_t; + + // see http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/sys/types.h + // for the size of the various datatype. + + // see http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/cmd/ptools/pargs/pargs.c + // for how to read this information + + psinfo.seek(8); + if (adjust(psinfo.readInt()) != pid) + throw new IOException("psinfo PID mismatch"); // sanity check + ppid = adjust(psinfo.readInt()); + + psinfo.seek(188); // now jump to pr_argc + argc = adjust(psinfo.readInt()); + argp = adjust(psinfo.readInt()); + envp = adjust(psinfo.readInt()); + } finally { + psinfo.close(); + } + if (ppid == -1) + throw new IOException("Failed to parse PPID from /proc/" + pid + "/status"); + + } + + public OSProcess getParent() { + return get(ppid); + } + + public synchronized List getArguments() { + if (arguments != null) + return arguments; + + arguments = new ArrayList(argc); + + try { + RandomAccessFile as = new RandomAccessFile(getFile("as"), "r"); + if (LOGGER.isLoggable(FINER)) + LOGGER.finer("Reading " + getFile("as")); + try { + for (int n = 0; n < argc; n++) { + // read a pointer to one entry + as.seek(to64(argp + n * 4)); + int p = adjust(as.readInt()); + + arguments.add(readLine(as, p, "argv[" + n + "]")); + } + } finally { + as.close(); + } + } catch (IOException e) { + // failed to read. this can happen under normal circumstances (most notably permission denied) + // so don't report this as an error. + } + + arguments = Collections.unmodifiableList(arguments); + return arguments; + } + + public synchronized EnvVars getEnvironmentVariables() { + if (envVars != null) + return envVars; + envVars = new EnvVars(); + + try { + RandomAccessFile as = new RandomAccessFile(getFile("as"), "r"); + if (LOGGER.isLoggable(FINER)) + LOGGER.finer("Reading " + getFile("as")); + try { + for (int n = 0;; n++) { + // read a pointer to one entry + as.seek(to64(envp + n * 4)); + int p = adjust(as.readInt()); + if (p == 0) + break; // completed the walk + + // now read the null-terminated string + envVars.addLine(readLine(as, p, "env[" + n + "]")); + } + } finally { + as.close(); + } + } catch (IOException e) { + // failed to read. this can happen under normal circumstances (most notably permission denied) + // so don't report this as an error. + } + + return envVars; + } + + private String readLine(RandomAccessFile as, int p, String prefix) throws IOException { + if (LOGGER.isLoggable(FINEST)) + LOGGER.finest("Reading " + prefix + " at " + p); + + as.seek(to64(p)); + ByteArrayOutputStream buf = new ByteArrayOutputStream(); + int ch, i = 0; + while ((ch = as.read()) > 0) { + if ((++i) % 100 == 0 && LOGGER.isLoggable(FINEST)) + LOGGER.finest(prefix + " is so far " + buf.toString()); + + buf.write(ch); + } + String line = buf.toString(); + if (LOGGER.isLoggable(FINEST)) + LOGGER.finest(prefix + " was " + line); + return line; + } + } + + /** + * int to long conversion with zero-padding. + */ + private static long to64(int i) { + return i & 0xFFFFFFFFL; + } + + /** + * {@link DataInputStream} reads a value in big-endian, so + * convert it to the correct value on little-endian systems. + */ + private static int adjust(int i) { + if (IS_LITTLE_ENDIAN) + return (i << 24) | ((i << 8) & 0x00FF0000) | ((i >> 8) & 0x0000FF00) | (i >>> 24); + else + return i; + } + + } + + /** + * Implementation for Mac OS X based on sysctl(3). + */ + private static class Darwin extends Unix { + Darwin() { + String arch = System.getProperty("sun.arch.data.model"); + if ("64".equals(arch)) { + sizeOf_kinfo_proc = sizeOf_kinfo_proc_64; + kinfo_proc_pid_offset = kinfo_proc_pid_offset_64; + kinfo_proc_ppid_offset = kinfo_proc_ppid_offset_64; + } else { + sizeOf_kinfo_proc = sizeOf_kinfo_proc_32; + kinfo_proc_pid_offset = kinfo_proc_pid_offset_32; + kinfo_proc_ppid_offset = kinfo_proc_ppid_offset_32; + } + try { + IntByReference underscore = new IntByReference(sizeOfInt); + IntByReference size = new IntByReference(sizeOfInt); + Memory m; + int nRetry = 0; + while (true) { + // find out how much memory we need to do this + if (LIBC.sysctl(MIB_PROC_ALL, 3, NULL, size, NULL, underscore) != 0) + throw new IOException("Failed to obtain memory requirement: " + + LIBC.strerror(Native.getLastError())); + + // now try the real call + m = new Memory(size.getValue()); + if (LIBC.sysctl(MIB_PROC_ALL, 3, m, size, NULL, underscore) != 0) { + if (Native.getLastError() == ENOMEM && nRetry++ < 16) + continue; // retry + throw new IOException("Failed to call kern.proc.all: " + LIBC.strerror(Native.getLastError())); + } + break; + } + + int count = size.getValue() / sizeOf_kinfo_proc; + LOGGER.fine("Found " + count + " processes"); + + for (int base = 0; base < size.getValue(); base += sizeOf_kinfo_proc) { + int pid = m.getInt(base + kinfo_proc_pid_offset); + int ppid = m.getInt(base + kinfo_proc_ppid_offset); + // int effective_uid = m.getInt(base+304); + // byte[] comm = new byte[16]; + // m.read(base+163,comm,0,16); + + super.processes.put(pid, new DarwinProcess(pid, ppid)); + } + } catch (IOException e) { + LOGGER.log(Level.WARNING, "Failed to obtain process list", e); + } + } + + private class DarwinProcess extends UnixProcess { + private final int ppid; + + private EnvVars envVars; + + private List arguments; + + DarwinProcess(int pid, int ppid) { + super(pid); + this.ppid = ppid; + } + + public OSProcess getParent() { + return get(ppid); + } + + public synchronized EnvVars getEnvironmentVariables() { + if (envVars != null) + return envVars; + parse(); + return envVars; + } + + public List getArguments() { + if (arguments != null) + return arguments; + parse(); + return arguments; + } + + private void parse() { + try { + // allocate them first, so that the parse error wil result in empty data + // and avoid retry. + arguments = new ArrayList(); + envVars = new EnvVars(); + + IntByReference underscore = new IntByReference(); + + IntByReference argmaxRef = new IntByReference(0); + IntByReference size = new IntByReference(sizeOfInt); + + // for some reason, I was never able to get sysctlbyname work. + // if(LIBC.sysctlbyname("kern.argmax", argmaxRef.getPointer(), size, NULL, _)!=0) + if (LIBC.sysctl(new int[] { CTL_KERN, KERN_ARGMAX }, 2, argmaxRef.getPointer(), size, NULL, underscore) != 0) + throw new IOException("Failed to get kernl.argmax: " + LIBC.strerror(Native.getLastError())); + + int argmax = argmaxRef.getValue(); + + class StringArrayMemory extends Memory { + private long offset = 0; + + StringArrayMemory(long l) { + super(l); + } + + int readInt() { + int r = getInt(offset); + offset += sizeOfInt; + return r; + } + + byte peek() { + return getByte(offset); + } + + String readString() { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte ch; + while ((ch = getByte(offset++)) != '\0') + baos.write(ch); + return baos.toString(); + } + + void skip0() { + // skip padding '\0's + while (getByte(offset) == '\0') + offset++; + } + } + StringArrayMemory m = new StringArrayMemory(argmax); + size.setValue(argmax); + if (LIBC.sysctl(new int[] { CTL_KERN, KERN_PROCARGS2, pid }, 3, m, size, NULL, underscore) != 0) + throw new IOException("Failed to obtain ken.procargs2: " + + LIBC.strerror(Native.getLastError())); + + /* + * Make a sysctl() call to get the raw argument space of the + * process. The layout is documented in start.s, which is part + * of the Csu project. In summary, it looks like: + * + * /---------------\ 0x00000000 + * : : + * : : + * |---------------| + * | argc | + * |---------------| + * | arg[0] | + * |---------------| + * : : + * : : + * |---------------| + * | arg[argc - 1] | + * |---------------| + * | 0 | + * |---------------| + * | env[0] | + * |---------------| + * : : + * : : + * |---------------| + * | env[n] | + * |---------------| + * | 0 | + * |---------------| <-- Beginning of data returned by sysctl() + * | exec_path | is here. + * |:::::::::::::::| + * | | + * | String area. | + * | | + * |---------------| <-- Top of stack. + * : : + * : : + * \---------------/ 0xffffffff + */ + + // I find the Darwin source code of the 'ps' command helpful in understanding how it does this: + // see http://www.opensource.apple.com/source/adv_cmds/adv_cmds-147/ps/print.c + int argc = m.readInt(); + String args0 = m.readString(); // exec path + m.skip0(); + try { + for (int i = 0; i < argc; i++) { + arguments.add(m.readString()); + } + } catch (IndexOutOfBoundsException e) { + throw new IllegalStateException("Failed to parse arguments: pid=" + pid + ", arg0=" + args0 + + ", arguments=" + arguments + ", nargs=" + argc + + ". Please run 'ps e " + pid + + "' and report this to https://issues.jenkins-ci.org/browse/JENKINS-9634", + e); + } + + // read env vars that follow + while (m.peek() != 0) + envVars.addLine(m.readString()); + } catch (IOException e) { + // this happens with insufficient permissions, so just ignore the problem. + } + } + } + + // local constants + private final int sizeOf_kinfo_proc; + + private static final int sizeOf_kinfo_proc_32 = 492; // on 32bit Mac OS X. + + private static final int sizeOf_kinfo_proc_64 = 648; // on 64bit Mac OS X. + + private final int kinfo_proc_pid_offset; + + private static final int kinfo_proc_pid_offset_32 = 24; + + private static final int kinfo_proc_pid_offset_64 = 40; + + private final int kinfo_proc_ppid_offset; + + private static final int kinfo_proc_ppid_offset_32 = 416; + + private static final int kinfo_proc_ppid_offset_64 = 560; + + private static final int sizeOfInt = Native.getNativeSize(int.class); + + private static final int CTL_KERN = 1; + + private static final int KERN_PROC = 14; + + private static final int KERN_PROC_ALL = 0; + + private static final int ENOMEM = 12; + + private static int[] MIB_PROC_ALL = { CTL_KERN, KERN_PROC, KERN_PROC_ALL }; + + private static final int KERN_ARGMAX = 8; + + private static final int KERN_PROCARGS2 = 49; + } + + /** + * Represents a local process tree, where this JVM and the process tree run on the same system. + * (The opposite of {@link Remote}.) + */ + public static abstract class Local extends ProcessTree { + Local() { + } + } + + /* + * On MacOS X, there's no procfs + * instead you'd do it with the sysctl + * + * + * + * There's CLI but that doesn't seem to offer the access to per-process info + * + * + * + * + * On HP-UX, pstat_getcommandline get you command line, but I'm not seeing any environment + * variables. + */ + + private static final boolean IS_LITTLE_ENDIAN = "little".equals(System.getProperty("sun.cpu.endian")); + + private static final Logger LOGGER = Logger.getLogger(ProcessTree.class.getName()); + + /** + * Flag to control this feature. + * + *

+ * This feature involves some native code, so we are allowing the user to disable this + * in case there's a fatal problem. + * + *

+ * This property supports two names for a compatibility reason. + */ + public static boolean enabled = !Boolean.getBoolean(ProcessTree.class.getName() + ".disable"); +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTreeRemoting.java b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTreeRemoting.java new file mode 100644 index 0000000..20dfdd7 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/ProcessTreeRemoting.java @@ -0,0 +1,58 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.lang.reflect.Proxy; +import java.util.List; +import java.util.Map; + + +/** + * Remoting interfaces of {@link ProcessTree}. + * + * These classes need to be public due to the way {@link Proxy} works. + * + * @author Kohsuke Kawaguchi + */ +public class ProcessTreeRemoting { + public interface IProcessTree { + void killAll(Map modelEnvVars) throws InterruptedException; + } + + public interface IOSProcess { + int getPid(); + + IOSProcess getParent(); + + void kill() throws InterruptedException; + + void killRecursively() throws InterruptedException; + + List getArguments(); + + EnvVars getEnvironmentVariables(); + } +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/QuotedStringTokenizer.java b/src/main/java/org/ow2/proactive/process_tree_killer/QuotedStringTokenizer.java new file mode 100644 index 0000000..5af17ad --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/QuotedStringTokenizer.java @@ -0,0 +1,473 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.StringTokenizer; + + +/* ------------------------------------------------------------ */ + +/** StringTokenizer with Quoting support. + * + * This class is a copy of the java.util.StringTokenizer API and + * the behaviour is the same, except that single and doulbe quoted + * string values are recognized. + * Delimiters within quotes are not considered delimiters. + * Quotes can be escaped with '\'. + * + * @see StringTokenizer + * @author Greg Wilkins (gregw) + */ +public class QuotedStringTokenizer extends StringTokenizer { + private final static String __delim = " \t\n\r"; + + private String _string; + + private String _delim = __delim; + + private boolean _returnQuotes = false; + + private boolean _returnDelimiters = false; + + private StringBuilder _token; + + private boolean _hasToken = false; + + private int _i = 0; + + private int _lastStart = 0; + + private boolean _double = true; + + private boolean _single = true; + + public static String[] tokenize(String str) { + return new QuotedStringTokenizer(str).toArray(); + } + + public static String[] tokenize(String str, String delimiters) { + return new QuotedStringTokenizer(str, delimiters).toArray(); + } + + /* ------------------------------------------------------------ */ + /** + * + * @param str + * String to tokenize. + * @param delim + * List of delimiter characters as string. Can be null, to default to ' \t\n\r' + * @param returnDelimiters + * If true, {@link #nextToken()} will include the delimiters, not just tokenized + * tokens. + * @param returnQuotes + * If true, {@link #nextToken()} will include the quotation characters when they are present. + */ + public QuotedStringTokenizer(String str, String delim, boolean returnDelimiters, boolean returnQuotes) { + super(""); + _string = str; + if (delim != null) + _delim = delim; + _returnDelimiters = returnDelimiters; + _returnQuotes = returnQuotes; + + if (_delim.indexOf('\'') >= 0 || _delim.indexOf('"') >= 0) + throw new Error("Can't use quotes as delimiters: " + _delim); + + _token = new StringBuilder(_string.length() > 1024 ? 512 : _string.length() / 2); + } + + /* ------------------------------------------------------------ */ + public QuotedStringTokenizer(String str, String delim, boolean returnDelimiters) { + this(str, delim, returnDelimiters, false); + } + + /* ------------------------------------------------------------ */ + public QuotedStringTokenizer(String str, String delim) { + this(str, delim, false, false); + } + + /* ------------------------------------------------------------ */ + public QuotedStringTokenizer(String str) { + this(str, null, false, false); + } + + public String[] toArray() { + List r = new ArrayList(); + while (hasMoreTokens()) + r.add(nextToken()); + return r.toArray(new String[r.size()]); + } + + /* ------------------------------------------------------------ */ + @Override + public boolean hasMoreTokens() { + // Already found a token + if (_hasToken) + return true; + + _lastStart = _i; + + int state = 0; + boolean escape = false; + while (_i < _string.length()) { + char c = _string.charAt(_i++); + + switch (state) { + case 0: // Start + if (_delim.indexOf(c) >= 0) { + if (_returnDelimiters) { + _token.append(c); + return _hasToken = true; + } + } else if (c == '\'' && _single) { + if (_returnQuotes) + _token.append(c); + state = 2; + } else if (c == '\"' && _double) { + if (_returnQuotes) + _token.append(c); + state = 3; + } else { + _token.append(c); + _hasToken = true; + state = 1; + } + continue; + + case 1: // Token + _hasToken = true; + if (escape) { + escape = false; + if (ESCAPABLE_CHARS.indexOf(c) < 0) + _token.append('\\'); + _token.append(c); + } else if (_delim.indexOf(c) >= 0) { + if (_returnDelimiters) + _i--; + return _hasToken; + } else if (c == '\'' && _single) { + if (_returnQuotes) + _token.append(c); + state = 2; + } else if (c == '\"' && _double) { + if (_returnQuotes) + _token.append(c); + state = 3; + } else if (c == '\\') { + escape = true; + } else + _token.append(c); + continue; + + case 2: // Single Quote + _hasToken = true; + if (escape) { + escape = false; + if (ESCAPABLE_CHARS.indexOf(c) < 0) + _token.append('\\'); + _token.append(c); + } else if (c == '\'') { + if (_returnQuotes) + _token.append(c); + state = 1; + } else if (c == '\\') { + if (_returnQuotes) + _token.append(c); + escape = true; + } else + _token.append(c); + continue; + + case 3: // Double Quote + _hasToken = true; + if (escape) { + escape = false; + if (ESCAPABLE_CHARS.indexOf(c) < 0) + _token.append('\\'); + _token.append(c); + } else if (c == '\"') { + if (_returnQuotes) + _token.append(c); + state = 1; + } else if (c == '\\') { + if (_returnQuotes) + _token.append(c); + escape = true; + } else + _token.append(c); + continue; + } + } + + return _hasToken; + } + + /* ------------------------------------------------------------ */ + @Override + public String nextToken() throws NoSuchElementException { + if (!hasMoreTokens() || _token == null) + throw new NoSuchElementException(); + String t = _token.toString(); + _token.setLength(0); + _hasToken = false; + return t; + } + + /* ------------------------------------------------------------ */ + @Override + public String nextToken(String delim) throws NoSuchElementException { + _delim = delim; + _i = _lastStart; + _token.setLength(0); + _hasToken = false; + return nextToken(); + } + + /* ------------------------------------------------------------ */ + @Override + public boolean hasMoreElements() { + return hasMoreTokens(); + } + + /* ------------------------------------------------------------ */ + @Override + public Object nextElement() throws NoSuchElementException { + return nextToken(); + } + + /* ------------------------------------------------------------ */ + /** Not implemented. + */ + @Override + public int countTokens() { + return -1; + } + + /* ------------------------------------------------------------ */ + /** Quote a string. + * The string is quoted only if quoting is required due to + * embeded delimiters, quote characters or the + * empty string. + * @param s The string to quote. + * @return quoted string + */ + public static String quote(String s, String delim) { + if (s == null) + return null; + if (s.length() == 0) + return "\"\""; + + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + if (c == '\\' || c == '"' || c == '\'' || Character.isWhitespace(c) || delim.indexOf(c) >= 0) { + StringBuffer b = new StringBuffer(s.length() + 8); + quote(b, s); + return b.toString(); + } + } + + return s; + } + + /* ------------------------------------------------------------ */ + /** Quote a string. + * The string is quoted only if quoting is required due to + * embeded delimiters, quote characters or the + * empty string. + * @param s The string to quote. + * @return quoted string + */ + public static String quote(String s) { + if (s == null) + return null; + if (s.length() == 0) + return "\"\""; + + StringBuffer b = new StringBuffer(s.length() + 8); + quote(b, s); + return b.toString(); + + } + + /* ------------------------------------------------------------ */ + /** Quote a string into a StringBuffer. + * The characters ", \, \n, \r, \t, \f and \b are escaped + * @param buf The StringBuffer + * @param s The String to quote. + */ + public static void quote(StringBuffer buf, String s) { + synchronized (buf) { + buf.append('"'); + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + switch (c) { + case '"': + buf.append("\\\""); + continue; + case '\\': + buf.append("\\\\"); + continue; + case '\n': + buf.append("\\n"); + continue; + case '\r': + buf.append("\\r"); + continue; + case '\t': + buf.append("\\t"); + continue; + case '\f': + buf.append("\\f"); + continue; + case '\b': + buf.append("\\b"); + continue; + + default: + buf.append(c); + continue; + } + } + buf.append('"'); + } + } + + /* ------------------------------------------------------------ */ + /** Unquote a string. + * @param s The string to unquote. + * @return quoted string + */ + public static String unquote(String s) { + if (s == null) + return null; + if (s.length() < 2) + return s; + + char first = s.charAt(0); + char last = s.charAt(s.length() - 1); + if (first != last || (first != '"' && first != '\'')) + return s; + + StringBuilder b = new StringBuilder(s.length() - 2); + boolean escape = false; + for (int i = 1; i < s.length() - 1; i++) { + char c = s.charAt(i); + + if (escape) { + escape = false; + switch (c) { + case 'n': + b.append('\n'); + break; + case 'r': + b.append('\r'); + break; + case 't': + b.append('\t'); + break; + case 'f': + b.append('\f'); + break; + case 'b': + b.append('\b'); + break; + case 'u': + b.append((char) ((convertHexDigit((byte) s.charAt(i++)) << 24) + + (convertHexDigit((byte) s.charAt(i++)) << 16) + + (convertHexDigit((byte) s.charAt(i++)) << 8) + + (convertHexDigit((byte) s.charAt(i++))))); + break; + default: + b.append(c); + } + } else if (c == '\\') { + escape = true; + continue; + } else + b.append(c); + } + + return b.toString(); + } + + /* ------------------------------------------------------------ */ + /** + * @return handle double quotes if true + */ + public boolean getDouble() { + return _double; + } + + /* ------------------------------------------------------------ */ + /** + * @param d handle double quotes if true + */ + public void setDouble(boolean d) { + _double = d; + } + + /* ------------------------------------------------------------ */ + /** + * @return handle single quotes if true + */ + public boolean getSingle() { + return _single; + } + + /* ------------------------------------------------------------ */ + /** + * @param single handle single quotes if true + */ + public void setSingle(boolean single) { + _single = single; + } + + /** + * @param b An ASCII encoded character 0-9 a-f A-F + * @return The byte value of the character 0-16. + */ + public static byte convertHexDigit(byte b) { + if ((b >= '0') && (b <= '9')) + return (byte) (b - '0'); + if ((b >= 'a') && (b <= 'f')) + return (byte) (b - 'a' + 10); + if ((b >= 'A') && (b <= 'F')) + return (byte) (b - 'A' + 10); + return 0; + } + + /** + * Characters that can be escaped with \. + * + * Others, like, say, \W will be left alone instead of becoming just W. + * This is important to keep Hudson behave on Windows, which uses '\' as + * the directory separator. + */ + private static final String ESCAPABLE_CHARS = "\\\"' "; +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/Util.java b/src/main/java/org/ow2/proactive/process_tree_killer/Util.java new file mode 100644 index 0000000..dd15d58 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/Util.java @@ -0,0 +1,107 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.util.Collection; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +public class Util { + + /** + * Pattern for capturing variables. Either $xyz, ${xyz} or ${a.b} but not $a.b, while ignoring "$$" + */ + private static final Pattern VARIABLE = Pattern.compile("\\$([A-Za-z0-9_]+|\\{[A-Za-z0-9_.]+\\}|\\$)"); + + /** + * Concatenate multiple strings by inserting a separator. + */ + public static String join(Collection strings, String separator) { + StringBuilder buf = new StringBuilder(); + boolean first = true; + for (Object s : strings) { + if (first) + first = false; + else + buf.append(separator); + buf.append(s); + } + return buf.toString(); + } + + /** + * Replaces the occurrence of '$key' by properties.get('key'). + * + *

+ * Unlike shell, undefined variables are left as-is (this behavior is the same as Ant.) + * + */ + + public static String replaceMacro(String s, Map properties) { + return replaceMacro(s, new VariableResolver.ByMap(properties)); + } + + /** + * Replaces the occurrence of '$key' by resolver.get('key'). + * + *

+ * Unlike shell, undefined variables are left as-is (this behavior is the same as Ant.) + */ + public static String replaceMacro(String s, VariableResolver resolver) { + if (s == null) { + return null; + } + + int idx = 0; + while (true) { + Matcher m = VARIABLE.matcher(s); + if (!m.find(idx)) + return s; + + String key = m.group().substring(1); + + // escape the dollar sign or get the key to resolve + String value; + if (key.charAt(0) == '$') { + value = "$"; + } else { + if (key.charAt(0) == '{') + key = key.substring(1, key.length() - 1); + value = resolver.resolve(key); + } + + if (value == null) + idx = m.end(); // skip this + else { + s = s.substring(0, m.start()) + value + s.substring(m.end()); + idx = m.start() + value.length(); + } + } + } + +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/VariableResolver.java b/src/main/java/org/ow2/proactive/process_tree_killer/VariableResolver.java new file mode 100644 index 0000000..67601f6 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/VariableResolver.java @@ -0,0 +1,106 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +import java.util.Collection; +import java.util.Map; + + +/** + * Resolves variables to its value, while encapsulating + * how that resolution happens. + * + * @author Kohsuke Kawaguchi + */ +@SuppressWarnings("all") +public interface VariableResolver { + /** + * Receives a variable name and obtains the value associated with the name. + * + *

+ * This can be implemented simply on top of a {@link Map} (see {@link ByMap}), or + * this can be used like an expression evaluator. + * + * @param name + * Name of the variable to be resolved. + * Never null, never empty. The name shouldn't include the syntactic + * marker of an expression. IOW, it should be "foo" but not "${foo}". + * A part of the goal of this design is to abstract away the expression + * marker syntax. + * @return + * Object referenced by the name. + * Null if not found. + */ + V resolve(String name); + + /** + * Empty resolver that always returns null. + */ + VariableResolver NONE = new VariableResolver() { + public Object resolve(String name) { + return null; + } + }; + + /** + * {@link VariableResolver} backed by a {@link Map}. + */ + final class ByMap implements VariableResolver { + private final Map data; + + public ByMap(Map data) { + this.data = data; + } + + public V resolve(String name) { + return data.get(name); + } + } + + /** + * Union of multiple {@link VariableResolver}. + */ + final class Union implements VariableResolver { + private final VariableResolver[] resolvers; + + public Union(VariableResolver... resolvers) { + this.resolvers = resolvers.clone(); + } + + public Union(Collection> resolvers) { + this.resolvers = resolvers.toArray(new VariableResolver[resolvers.size()]); + } + + public V resolve(String name) { + for (VariableResolver r : resolvers) { + V v = r.resolve(name); + if (v != null) + return v; + } + return null; + } + } +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/VersionNumber.java b/src/main/java/org/ow2/proactive/process_tree_killer/VersionNumber.java new file mode 100644 index 0000000..ca6f523 --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/VersionNumber.java @@ -0,0 +1,512 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Locale; +import java.util.Properties; +import java.util.Stack; + + +/** + * Immutable representation of a version number based on the Mercury version numbering scheme. + * + * {@link VersionNumber}s are {@link Comparable}. + * + *

Special tokens

+ *

+ * We allow a component to be not just a number, but also "ea", "ea1", "ea2". + * "ea" is treated as "ea0", and eaN < M for any M > 0. + * + *

+ * '*' is also allowed as a component, and '*' > M for any M > 0. + * + *

+ * 'SNAPSHOT' is also allowed as a component, and "N.SNAPSHOT" is interpreted as "N-1.*" + * + *

+ * 2.0.* > 2.0.1 > 2.0.1-SNAPSHOT > 2.0.0.99 > 2.0.0 > 2.0.ea > 2.0
+ * 
+ * + * This class is re-implemented in 1.415. The class was originally introduced in 1.139 + * + * @since 1.139 + * @author Stephen Connolly (stephenc@apache.org) + * @author Kenney Westerhof (kenney@apache.org) + * @author Hervé Boutemy (hboutemy@apache.org) + */ +@SuppressWarnings("all") +public class VersionNumber implements Comparable { + private String value; + + private String canonical; + + private ListItem items; + + private interface Item { + public static final int INTEGER_ITEM = 0; + + public static final int STRING_ITEM = 1; + + public static final int LIST_ITEM = 2; + + public static final int WILDCARD_ITEM = 3; + + public int compareTo(Item item); + + public int getType(); + + public boolean isNull(); + } + + /** + * Represents a wild-card item in the version item list. + */ + private static class WildCardItem implements Item { + + public int compareTo(Item item) { + if (item == null) // 1.* ( > 1.99) > 1 + return 1; + switch (item.getType()) { + case INTEGER_ITEM: + case LIST_ITEM: + case STRING_ITEM: + return 1; + case WILDCARD_ITEM: + return 0; + default: + return 1; + } + } + + public int getType() { + return WILDCARD_ITEM; + } + + public boolean isNull() { + return false; + } + + @Override + public String toString() { + return "*"; + } + } + + /** + * Represents a numeric item in the version item list. + */ + private static class IntegerItem implements Item { + private static final BigInteger BigInteger_ZERO = new BigInteger("0"); + + private final BigInteger value; + + public static final IntegerItem ZERO = new IntegerItem(); + + private IntegerItem() { + this.value = BigInteger_ZERO; + } + + public IntegerItem(String str) { + this.value = new BigInteger(str); + } + + public int getType() { + return INTEGER_ITEM; + } + + public boolean isNull() { + return BigInteger_ZERO.equals(value); + } + + public int compareTo(Item item) { + if (item == null) { + return BigInteger_ZERO.equals(value) ? 0 : 1; // 1.0 == 1, 1.1 > 1 + } + + switch (item.getType()) { + case INTEGER_ITEM: + return value.compareTo(((IntegerItem) item).value); + + case STRING_ITEM: + return 1; // 1.1 > 1-sp + + case LIST_ITEM: + return 1; // 1.1 > 1-1 + + case WILDCARD_ITEM: + return 0; + + default: + throw new RuntimeException("invalid item: " + item.getClass()); + } + } + + public String toString() { + return value.toString(); + } + } + + /** + * Represents a string in the version item list, usually a qualifier. + */ + private static class StringItem implements Item { + private final static String[] QUALIFIERS = { "snapshot", "alpha", "beta", "milestone", "rc", "", "sp" }; + + private final static List _QUALIFIERS = Arrays.asList(QUALIFIERS); + + private final static Properties ALIASES = new Properties(); + + static { + ALIASES.put("ga", ""); + ALIASES.put("final", ""); + ALIASES.put("cr", "rc"); + ALIASES.put("ea", "rc"); + } + + /** + * A comparable for the empty-string qualifier. This one is used to determine if a given qualifier makes the + * version older than one without a qualifier, or more recent. + */ + private static String RELEASE_VERSION_INDEX = String.valueOf(_QUALIFIERS.indexOf("")); + + private String value; + + public StringItem(String value, boolean followedByDigit) { + if (followedByDigit && value.length() == 1) { + // a1 = alpha-1, b1 = beta-1, m1 = milestone-1 + switch (value.charAt(0)) { + case 'a': + value = "alpha"; + break; + case 'b': + value = "beta"; + break; + case 'm': + value = "milestone"; + break; + } + } + this.value = ALIASES.getProperty(value, value); + } + + public int getType() { + return STRING_ITEM; + } + + public boolean isNull() { + return (comparableQualifier(value).compareTo(RELEASE_VERSION_INDEX) == 0); + } + + /** + * Returns a comparable for a qualifier. + *

+ * This method both takes into account the ordering of known qualifiers as well as lexical ordering for unknown + * qualifiers. + *

+ * just returning an Integer with the index here is faster, but requires a lot of if/then/else to check for -1 + * or QUALIFIERS.size and then resort to lexical ordering. Most comparisons are decided by the first character, + * so this is still fast. If more characters are needed then it requires a lexical sort anyway. + * + * @param qualifier + * @return + */ + public static String comparableQualifier(String qualifier) { + int i = _QUALIFIERS.indexOf(qualifier); + + return i == -1 ? _QUALIFIERS.size() + "-" + qualifier : String.valueOf(i); + } + + public int compareTo(Item item) { + if (item == null) { + // 1-rc < 1, 1-ga > 1 + return comparableQualifier(value).compareTo(RELEASE_VERSION_INDEX); + } + switch (item.getType()) { + case INTEGER_ITEM: + return -1; // 1.any < 1.1 ? + + case STRING_ITEM: + return comparableQualifier(value).compareTo(comparableQualifier(((StringItem) item).value)); + + case LIST_ITEM: + return -1; // 1.any < 1-1 + + case WILDCARD_ITEM: + return -1; + + default: + throw new RuntimeException("invalid item: " + item.getClass()); + } + } + + public String toString() { + return value; + } + } + + /** + * Represents a version list item. This class is used both for the global item list and for sub-lists (which start + * with '-(number)' in the version specification). + */ + private static class ListItem extends ArrayList implements Item { + public int getType() { + return LIST_ITEM; + } + + public boolean isNull() { + return (size() == 0); + } + + void normalize() { + for (ListIterator iterator = listIterator(size()); iterator.hasPrevious();) { + Item item = (Item) iterator.previous(); + if (item.isNull()) { + iterator.remove(); // remove null trailing items: 0, "", empty list + } else { + break; + } + } + } + + public int compareTo(Item item) { + if (item == null) { + if (size() == 0) { + return 0; // 1-0 = 1- (normalize) = 1 + } + Item first = (Item) get(0); + return first.compareTo(null); + } + + switch (item.getType()) { + case INTEGER_ITEM: + return -1; // 1-1 < 1.0.x + + case STRING_ITEM: + return 1; // 1-1 > 1-sp + + case LIST_ITEM: + Iterator left = iterator(); + Iterator right = ((ListItem) item).iterator(); + + while (left.hasNext() || right.hasNext()) { + Item l = left.hasNext() ? (Item) left.next() : null; + Item r = right.hasNext() ? (Item) right.next() : null; + + // if this is shorter, then invert the compare and mul with -1 + int result = l == null ? -1 * r.compareTo(l) : l.compareTo(r); + + if (result != 0) { + return result; + } + } + + return 0; + + case WILDCARD_ITEM: + return -1; + + default: + throw new RuntimeException("invalid item: " + item.getClass()); + } + } + + public String toString() { + StringBuilder buffer = new StringBuilder("("); + for (Iterator iter = iterator(); iter.hasNext();) { + buffer.append(iter.next()); + if (iter.hasNext()) { + buffer.append(','); + } + } + buffer.append(')'); + return buffer.toString(); + } + } + + public VersionNumber(String version) { + parseVersion(version); + } + + private void parseVersion(String version) { + this.value = version; + + items = new ListItem(); + + version = version.toLowerCase(Locale.ENGLISH); + + ListItem list = items; + + Stack stack = new Stack(); + stack.push(list); + + boolean isDigit = false; + + int startIndex = 0; + + for (int i = 0; i < version.length(); i++) { + char c = version.charAt(i); + + if (c == '.') { + if (i == startIndex) { + list.add(IntegerItem.ZERO); + } else { + list.add(parseItem(isDigit, version.substring(startIndex, i))); + } + startIndex = i + 1; + } else if (c == '-') { + if (i == startIndex) { + list.add(IntegerItem.ZERO); + } else { + list.add(parseItem(isDigit, version.substring(startIndex, i))); + } + startIndex = i + 1; + + if (isDigit) { + list.normalize(); // 1.0-* = 1-* + + if ((i + 1 < version.length()) && Character.isDigit(version.charAt(i + 1))) { + // new ListItem only if previous were digits and new char is a digit, + // ie need to differentiate only 1.1 from 1-1 + list.add(list = new ListItem()); + + stack.push(list); + } + } + } else if (c == '*') { + list.add(new WildCardItem()); + startIndex = i + 1; + } else if (Character.isDigit(c)) { + if (!isDigit && i > startIndex) { + list.add(new StringItem(version.substring(startIndex, i), true)); + startIndex = i; + } + + isDigit = true; + } else if (Character.isWhitespace(c)) { + if (i > startIndex) { + if (isDigit) { + list.add(parseItem(true, version.substring(startIndex, i))); + } else { + list.add(new StringItem(version.substring(startIndex, i), true)); + } + startIndex = i; + } + + isDigit = false; + } else { + if (isDigit && i > startIndex) { + list.add(parseItem(true, version.substring(startIndex, i))); + startIndex = i; + } + + isDigit = false; + } + } + + if (version.length() > startIndex) { + list.add(parseItem(isDigit, version.substring(startIndex))); + } + + while (!stack.isEmpty()) { + list = (ListItem) stack.pop(); + list.normalize(); + } + + canonical = items.toString(); + } + + private static Item parseItem(boolean isDigit, String buf) { + return isDigit ? (Item) new IntegerItem(buf) : (Item) new StringItem(buf, false); + } + + public int compareTo(VersionNumber o) { + return items.compareTo(o.items); + } + + public String toString() { + return value; + } + + public boolean equals(Object o) { + return (o instanceof VersionNumber) && canonical.equals(((VersionNumber) o).canonical); + } + + public int hashCode() { + return canonical.hashCode(); + } + + public boolean isOlderThan(VersionNumber rhs) { + return compareTo(rhs) < 0; + } + + public boolean isNewerThan(VersionNumber rhs) { + return compareTo(rhs) > 0; + } + + public int digit(int idx) { + Iterator i = items.iterator(); + Item item = (Item) i.next(); + while (idx > 0 && i.hasNext()) { + if (item instanceof IntegerItem) { + idx--; + } + i.next(); + } + return ((IntegerItem) item).value.intValue(); + } + + public static final Comparator DESCENDING = new Comparator() { + public int compare(VersionNumber o1, VersionNumber o2) { + return o2.compareTo(o1); + } + }; +} diff --git a/src/main/java/org/ow2/proactive/process_tree_killer/jna/GNUCLibrary.java b/src/main/java/org/ow2/proactive/process_tree_killer/jna/GNUCLibrary.java new file mode 100644 index 0000000..e9cff8e --- /dev/null +++ b/src/main/java/org/ow2/proactive/process_tree_killer/jna/GNUCLibrary.java @@ -0,0 +1,129 @@ +/* + * ProActive Parallel Suite(TM): + * The Open Source library for parallel and distributed + * Workflows & Scheduling, Orchestration, Cloud Automation + * and Big Data Analysis on Enterprise Grids & Clouds. + * + * Copyright (c) 2007 - 2017 ActiveEon + * Contact: contact@activeeon.com + * + * This library is free software: you can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation: version 3 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * If needed, contact us to obtain a release under GPL Version 2 or 3 + * or a different license than the AGPL. + */ +package org.ow2.proactive.process_tree_killer.jna; + +import com.sun.jna.Library; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.NativeLong; +import com.sun.jna.Pointer; +import com.sun.jna.StringArray; +import com.sun.jna.ptr.IntByReference; + + +/** + * GNU C library. + * + *

+ * Not available on all platforms (such as Linux/PPC, IBM mainframe, etc.), so the caller should recover gracefully + * in case of {@link LinkageError}. See HUDSON-4820. + * @author Kohsuke Kawaguchi + */ +public interface GNUCLibrary extends Library { + int fork(); + + int kill(int pid, int signum); + + int setsid(); + + int umask(int mask); + + int getpid(); + + int geteuid(); + + int getegid(); + + int getppid(); + + int chdir(String dir); + + int getdtablesize(); + + int execv(String path, StringArray args); + + int execvp(String file, StringArray args); + + int setenv(String name, String value, int replace); + + int unsetenv(String name); + + void perror(String msg); + + String strerror(int errno); + + int fcntl(int fd, int command); + + int fcntl(int fd, int command, int flags); + + // obtained from Linux. Needs to be checked if these values are portable. + int F_GETFD = 1; + + int F_SETFD = 2; + + int FD_CLOEXEC = 1; + + int chown(String fileName, int uid, int gid); + + int chmod(String fileName, int i); + + int dup(int old); + + int dup2(int old, int _new); + + int close(int fd); + + // see http://www.gnu.org/s/libc/manual/html_node/Renaming-Files.html + int rename(String oldname, String newname); + + // this is listed in http://developer.apple.com/DOCUMENTATION/Darwin/Reference/ManPages/man3/sysctlbyname.3.html + // but not in http://www.gnu.org/software/libc/manual/html_node/System-Parameters.html#index-sysctl-3493 + // perhaps it is only supported on BSD? + int sysctlbyname(String name, Pointer oldp, IntByReference oldlenp, Pointer newp, IntByReference newlen); + + int sysctl(int[] mib, int nameLen, Pointer oldp, IntByReference oldlenp, Pointer newp, IntByReference newlen); + + int sysctlnametomib(String name, Pointer mibp, IntByReference size); + + /** + * Creates a symlink. + * + * See http://linux.die.net/man/3/symlink + */ + int symlink(String oldname, String newname); + + /** + * Read a symlink. The name will be copied into the specified memory, and returns the number of + * bytes copied. The string is not null-terminated. + * + * @return + * if the return value equals size, the caller needs to retry with a bigger buffer. + * If -1, error. + */ + int readlink(String filename, Memory buffer, NativeLong size); + + GNUCLibrary LIBC = (GNUCLibrary) Native.loadLibrary("c", GNUCLibrary.class); +} diff --git a/src/test/java/cloud/localstack/DDBEventMappingTest.java b/src/test/java/cloud/localstack/DDBEventMappingTest.java new file mode 100644 index 0000000..5200b91 --- /dev/null +++ b/src/test/java/cloud/localstack/DDBEventMappingTest.java @@ -0,0 +1,39 @@ +package cloud.localstack; + +import cloud.localstack.lambda.DDBEventParser; +import com.amazonaws.services.lambda.runtime.events.DynamodbEvent; +import com.fasterxml.jackson.databind.ObjectMapper; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.*; + +import static cloud.localstack.LambdaExecutor.readFile; +import static cloud.localstack.LambdaExecutor.get; + +public class DDBEventMappingTest { + + static String fileName = "src/test/resources/DDBEventLambda.json"; + + @Test + public void testParseDDBEvent() throws Exception { + String fileContent = readFile(fileName); + + ObjectMapper reader = new ObjectMapper(); + @SuppressWarnings("deprecation") + Map map = reader.reader(Map.class).readValue(fileContent); + + List> records = (List>) get(map, "Records"); + + DynamodbEvent ddbEvent = DDBEventParser.parse(records); + + DynamodbEvent.DynamodbStreamRecord record = ddbEvent.getRecords().iterator().next(); + + + Assert.assertTrue("The map must be empty", record.getDynamodb().getOldImage().isEmpty()); + Assert.assertEquals("The numbers must match",record.getDynamodb().getNewImage().get("number").getN(), "1" ); + Assert.assertArrayEquals("The set must match", + record.getDynamodb().getNewImage().get("numbers").getNS().toArray(), Arrays.asList("1","3","5","6").toArray()); + } +} diff --git a/src/test/java/cloud/localstack/LocalTestUtil.java b/src/test/java/cloud/localstack/LocalTestUtil.java new file mode 100644 index 0000000..c279a6f --- /dev/null +++ b/src/test/java/cloud/localstack/LocalTestUtil.java @@ -0,0 +1,85 @@ +package cloud.localstack; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.jar.JarEntry; +import java.util.jar.JarOutputStream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import com.amazonaws.services.lambda.runtime.events.SQSEvent; +import org.apache.commons.io.IOUtils; + +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.lambda.model.FunctionCode; + +/** + * Utility methods used for the LocalStack unit and integration tests. + * + * @author Waldemar Hummer + */ +public class LocalTestUtil { + + public static FunctionCode createFunctionCode(Class clazz) throws Exception { + FunctionCode code = new FunctionCode(); + ByteArrayOutputStream zipOut = new ByteArrayOutputStream(); + ByteArrayOutputStream jarOut = new ByteArrayOutputStream(); + // create zip file + ZipOutputStream zipStream = new ZipOutputStream(zipOut); + // create jar file + JarOutputStream jarStream = new JarOutputStream(jarOut); + + // write class files into jar stream + addClassToJar(clazz, jarStream); + addClassToJar(Record.class, jarStream); + addClassToJar(SQSEvent.class, jarStream); + // write MANIFEST into jar stream + JarEntry mfEntry = new JarEntry("META-INF/MANIFEST.MF"); + jarStream.putNextEntry(mfEntry); + jarStream.closeEntry(); + jarStream.close(); + + // write jar into zip stream + ZipEntry codeEntry = new ZipEntry("LambdaCode.jar"); + zipStream.putNextEntry(codeEntry); + zipStream.write(jarOut.toByteArray()); + zipStream.closeEntry(); + + zipStream.close(); + code.setZipFile(ByteBuffer.wrap(zipOut.toByteArray())); + + return code; + } + + private static void addClassToJar(Class clazz, JarOutputStream jarStream) throws IOException { + String resource = clazz.getName().replace(".", File.separator) + ".class"; + JarEntry jarEntry = new JarEntry(resource); + jarStream.putNextEntry(jarEntry); + IOUtils.copy(LocalTestUtil.class.getResourceAsStream("/" + resource), jarStream); + jarStream.closeEntry(); + } + + public static void retry(Runnable r) { + retry(r, 5, 1); + } + + public static void retry(Runnable r, int retries, double sleepSecs) { + for (int i = 0; i < retries; i++) { + try { + r.run(); + return; + } catch (Throwable e) { + try { + Thread.sleep((int)(sleepSecs * 1000)); + } catch (InterruptedException e1) {} + if (i >= retries - 1) { + throw new RuntimeException(e); + } + } + } + } + +} diff --git a/src/test/java/cloud/localstack/S3EventMappingTest.java b/src/test/java/cloud/localstack/S3EventMappingTest.java new file mode 100644 index 0000000..85a85ae --- /dev/null +++ b/src/test/java/cloud/localstack/S3EventMappingTest.java @@ -0,0 +1,63 @@ +package cloud.localstack; + +import cloud.localstack.lambda.S3EventParser; +import com.amazonaws.services.lambda.runtime.events.S3Event; +import com.amazonaws.services.s3.event.S3EventNotification; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; +import java.util.Map; + +import static cloud.localstack.LambdaExecutor.get; +import static cloud.localstack.LambdaExecutor.readFile; + +public class S3EventMappingTest { + + static String fileName = "src/test/resources/S3EventLambda.json"; + + @Test + public void testParseS3Event() throws Exception { + String fileContent = readFile(fileName); + + ObjectMapper reader = new ObjectMapper(); + @SuppressWarnings("deprecation") + Map map = reader.reader(Map.class).readValue(fileContent); + List> records = (List>) get(map, "Records"); + + S3Event s3Event = S3EventParser.parse(records); + S3EventNotification.S3EventNotificationRecord record = s3Event.getRecords().get(0); + + // grab expected results + Map expectedResultRecord = records.get(0); + Map expS3 = (Map) expectedResultRecord.get("s3"); + Map expBk = ((Map) get(expS3, "bucket")); + Map expOi = (Map) get(expBk, "ownerIdentity"); + + // verify parsed event info + Assert.assertEquals("eventVersion match", expectedResultRecord.get("eventVersion"), record.getEventVersion()); + Assert.assertEquals("eventTime match", expectedResultRecord.get("eventTime"), record.getEventTime().toString()); + Assert.assertEquals("sourceIPAddress match", get((Map)expectedResultRecord.get("requestParameters"), "sourceIPAddress"), record.getRequestParameters().getSourceIPAddress()); + + Assert.assertEquals("s3 configurationId match", expS3.get("configurationId"), record.getS3().getConfigurationId()); + Assert.assertEquals("s3 object versionId match", get((Map) expS3.get("object"),"versionId"), record.getS3().getObject().getVersionId()); + Assert.assertEquals("s3 object eTag match", get((Map) expS3.get("object"),"eTag"), record.getS3().getObject().geteTag()); + Assert.assertEquals("s3 object key match", get((Map) expS3.get("object"),"key"), record.getS3().getObject().getKey()); + Assert.assertEquals("s3 object sequencer match", get((Map) expS3.get("object"),"sequencer"), record.getS3().getObject().getSequencer()); + Assert.assertEquals("s3 object size match", new Long(get((Map) expS3.get("object"),"size").toString()), record.getS3().getObject().getSizeAsLong()); + Assert.assertEquals("s3 ownerEntity principalId match", expOi.get("principalId"), record.getS3().getBucket().getOwnerIdentity().getPrincipalId()); + Assert.assertEquals("s3 bucket name match", expBk.get("name"), record.getS3().getBucket().getName() ); + Assert.assertEquals("s3 schemaVersion match", expS3.get("s3SchemaVersion"), record.getS3().getS3SchemaVersion() ); + + Assert.assertEquals("responseElements x-amz-id-2 match", get((Map) expectedResultRecord.get("responseElements"),"x-amz-id-2"), record.getResponseElements().getxAmzId2()); + Assert.assertEquals("responseElements x-amz-request-id match", get((Map) expectedResultRecord.get("responseElements"),"x-amz-request-id"), record.getResponseElements().getxAmzRequestId()); + Assert.assertEquals("awsRegion match", expectedResultRecord.get("awsRegion"), record.getAwsRegion()); + Assert.assertEquals("eventName match", expectedResultRecord.get("eventName"), record.getEventName()); + Assert.assertEquals("userIdentity principalId", get((Map) expectedResultRecord.get("userIdentity"),"principalId"), record.getUserIdentity().getPrincipalId()); + Assert.assertEquals("eventSource match", expectedResultRecord.get("eventSource"), record.getEventSource()); + + } + +} + diff --git a/src/test/java/cloud/localstack/S3FeaturesTest.java b/src/test/java/cloud/localstack/S3FeaturesTest.java new file mode 100644 index 0000000..19b36d1 --- /dev/null +++ b/src/test/java/cloud/localstack/S3FeaturesTest.java @@ -0,0 +1,208 @@ +package cloud.localstack; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.*; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.stream.*; +import java.net.*; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.apache.commons.io.IOUtils; +import org.apache.http.*; +import org.apache.http.conn.ssl.*; +import org.apache.http.client.*; +import org.apache.http.client.methods.*; +import org.apache.http.entity.*; +import org.apache.http.impl.client.*; + +import com.amazonaws.HttpMethod; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.*; +import com.amazonaws.services.s3.model.*; +import com.amazonaws.services.s3.model.lifecycle.*; + +import cloud.localstack.docker.annotation.LocalstackDockerProperties; + +@RunWith(LocalstackTestRunner.class) +@LocalstackDockerProperties(services = {"s3"}, ignoreDockerRunErrors=true) +public class S3FeaturesTest { + + /** + * Test that S3 bucket lifecycle settings can be set and read. + */ + @Test + public void testSetBucketLifecycle() throws Exception { + AmazonS3 client = TestUtils.getClientS3(); + + String bucketName = UUID.randomUUID().toString(); + client.createBucket(bucketName); + + BucketLifecycleConfiguration.Rule rule = new BucketLifecycleConfiguration.Rule() + .withId("expirationRule") + .withFilter(new LifecycleFilter(new LifecycleTagPredicate(new Tag("deleted", "true")))) + .withExpirationInDays(3) + .withStatus(BucketLifecycleConfiguration.ENABLED); + + BucketLifecycleConfiguration bucketLifecycleConfiguration = new BucketLifecycleConfiguration() + .withRules(rule); + + client.setBucketLifecycleConfiguration(bucketName, bucketLifecycleConfiguration); + + bucketLifecycleConfiguration = client.getBucketLifecycleConfiguration(bucketName); + + assertNotNull(bucketLifecycleConfiguration); + assertEquals(bucketLifecycleConfiguration.getRules().get(0).getId(), "expirationRule"); + + client.deleteBucket(bucketName); + } + + /** + * Test HTTPS connections with local S3 service + */ + @Test + public void testHttpsConnection() { + if (!Localstack.useSSL()) { + return; + } + + TestUtils.disableSslCertChecking(); + + String bucketName = "test-bucket-https"; + + AmazonS3 amazonS3Client = AmazonS3ClientBuilder.standard() + .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( + Localstack.INSTANCE.getEndpointS3(), + Localstack.getDefaultRegion())) + .withCredentials(TestUtils.getCredentialsProvider()) + .withChunkedEncodingDisabled(true) + .withPathStyleAccessEnabled(true).build(); + InputStream is = new ByteArrayInputStream("test file content".getBytes()); + amazonS3Client.createBucket(bucketName); + PutObjectRequest putObjectRequest = new PutObjectRequest( + bucketName, "key1", is, new ObjectMetadata()). + withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams("kmsKeyId")); + PutObjectResult result = amazonS3Client.putObject(putObjectRequest); + Assert.assertNotNull(result); + Assert.assertNotNull(result.getMetadata().getContentType()); + Assert.assertNotNull(result.getMetadata().getETag()); + } + + /** + * Test storing and retrieving of S3 object metadata + */ + @Test + public void testMetadata() { + AmazonS3 s3 = TestUtils.getClientS3(); + + String bucketName = UUID.randomUUID().toString(); + s3.createBucket(bucketName); + + Map originalMetadata = new HashMap(); + originalMetadata.put("key1", "val1"); + originalMetadata.put("key_2", "val2"); + originalMetadata.put("__key3", "val3"); + + ObjectMetadata objectMetadata = new ObjectMetadata(); + objectMetadata.setUserMetadata(originalMetadata); + + InputStream is = new ByteArrayInputStream("test-string".getBytes(StandardCharsets.UTF_8)); + s3.putObject(new PutObjectRequest(bucketName, "my-key1", is, objectMetadata)); + + S3Object getObj = s3.getObject(new GetObjectRequest(bucketName, "my-key1")); + ObjectMetadata objectMetadataResponse = getObj.getObjectMetadata(); + + Map receivedMetadata = objectMetadataResponse.getUserMetadata(); + + Assert.assertEquals(originalMetadata, receivedMetadata); + } + + @Test + public void testListNextBatchOfObjects() { + AmazonS3 s3Client = TestUtils.getClientS3(); + String s3BucketName = UUID.randomUUID().toString(); + s3Client.createBucket(s3BucketName); + s3Client.putObject(s3BucketName, "key1", "content"); + s3Client.putObject(s3BucketName, "key2", "content"); + s3Client.putObject(s3BucketName, "key3", "content"); + + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(s3BucketName) + .withPrefix("") + .withDelimiter("/") + .withMaxKeys(1); // 1 Key per request + + ObjectListing objectListing = s3Client.listObjects(listObjectsRequest); + List someObjList = new LinkedList<>(); + someObjList.addAll(mapFilesToSomeObject(objectListing)); // puts at least 1 item into the list + + while (objectListing.isTruncated()) { + objectListing = s3Client.listNextBatchOfObjects(objectListing); + someObjList.addAll(mapFilesToSomeObject(objectListing)); + } + assertEquals(3, someObjList.size()); + } + + @Test + public void testUploadEmptyBody() { + AmazonS3 s3client = TestUtils.getClientS3(); + String bucketName = UUID.randomUUID().toString(); + String keyName = "test-key-empty"; + s3client.createBucket(bucketName); + InputStream stream = new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + PutObjectRequest request = new PutObjectRequest(bucketName, keyName, stream, new ObjectMetadata()); + PutObjectResult result = s3client.putObject(request); + Assert.assertEquals(result.getMetadata().getETag(), "d41d8cd98f00b204e9800998ecf8427e"); + } + + @Test + public void testPresignedURLUpload() throws Exception { + AmazonS3 s3client = TestUtils.getClientS3(); + Date expiration = new Date(System.currentTimeMillis() + 1000*60*5); + String bucketName = UUID.randomUUID().toString(); + String keyName = "presign-test-key"; + s3client.createBucket(bucketName); + + GeneratePresignedUrlRequest generatePresignedUrlRequest = + new GeneratePresignedUrlRequest(bucketName, keyName) + .withMethod(HttpMethod.PUT) + .withExpiration(expiration) + .withKey(keyName); + URL presignedUrl = s3client.generatePresignedUrl(generatePresignedUrlRequest); + + // upload content + String content = "test content"; + HttpPut httpPut = new HttpPut(presignedUrl.toString()); + httpPut.setEntity(new StringEntity(content)); + SSLContextBuilder builder = new SSLContextBuilder(); + builder.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(builder.build()); + CloseableHttpClient httpclient = HttpClients.custom().setSSLSocketFactory(sslsf).build(); + + httpclient.execute(httpPut); + httpclient.close(); + + // download content + GetObjectRequest req = new GetObjectRequest(bucketName, keyName); + S3Object stream = s3client.getObject(req); + String result = IOUtils.toString(stream.getObjectContent()); + Assert.assertEquals(result, content); + } + + // ---------------- + // UTILITY METHODS + // ---------------- + + private List mapFilesToSomeObject(ObjectListing objectListing) { + return objectListing.getObjectSummaries() + .stream() + .map(S3ObjectSummary::getKey) + .collect(Collectors.toList()); + } + +} diff --git a/src/test/java/cloud/localstack/S3UploadTest.java b/src/test/java/cloud/localstack/S3UploadTest.java new file mode 100644 index 0000000..61df771 --- /dev/null +++ b/src/test/java/cloud/localstack/S3UploadTest.java @@ -0,0 +1,110 @@ +package cloud.localstack; + +import static org.junit.Assert.assertEquals; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.UUID; + +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.io.IOUtils; +import org.apache.http.entity.ContentType; +import org.junit.Test; +import org.junit.runner.RunWith; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.S3Object; + +/** + * Test S3 uploads to LocalStack + */ +@RunWith(LocalstackTestRunner.class) +public class S3UploadTest { + + /** + * Test based on https://github.com/localstack/localstack/issues/359 + */ + @Test + public void testTrival() throws Exception { + testUpload("{}"); // Some JSON content, just an example + } + + /** + * Tests greater than 128k uploads + * @throws Exception + */ + @Test + public void testGreaterThan128k() throws Exception { + testUpload(String.join("", Collections.nCopies(13108, "abcdefghij"))); // Just slightly more than 2^17 bytes + } + + /** + * Tests less than 128k uploads + * @throws Exception + */ + @Test + public void testLessThan128k() throws Exception { + testUpload(String.join("", Collections.nCopies(13107, "abcdefghij"))); // Just slightly less than 2^17 bytes + } + + /** + * Tests upload of empty file. This is an operation that hadoop's S3AFilesystem executes to create "directories" + * in S3. + * + * This test is currently failing because the S3 server doesn't properly calculate the MD5 of null string. + */ + @Test + public void testZeroLengthUpload() { + AmazonS3 client = TestUtils.getClientS3(); + + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + client.createBucket(bucketName); + + final ObjectMetadata objectMetadata = new ObjectMetadata(); + final InputStream inputStream = new ByteArrayInputStream(new byte[0]); + objectMetadata.setContentLength(0L); + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, + keyName, + inputStream, + objectMetadata); + client.putObject(putObjectRequest); + } + + private void testUpload(final String dataString) throws Exception { + AmazonS3 client = TestUtils.getClientS3(); + + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + client.createBucket(bucketName); + + byte[] dataBytes = dataString.getBytes(StandardCharsets.UTF_8); + + ObjectMetadata metaData = new ObjectMetadata(); + metaData.setContentType(ContentType.APPLICATION_JSON.toString()); + metaData.setContentEncoding(StandardCharsets.UTF_8.name()); + metaData.setContentLength(dataBytes.length); + + byte[] resultByte = DigestUtils.md5(dataBytes); + String streamMD5 = new String(Base64.encodeBase64(resultByte)); + metaData.setContentMD5(streamMD5); + + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, keyName, + new ByteArrayInputStream(dataBytes), metaData); + client.putObject(putObjectRequest); + + S3Object object = client.getObject(bucketName, keyName); + String returnedContent = IOUtils.toString(object.getObjectContent(), "utf-8"); + assertEquals(streamMD5, object.getObjectMetadata().getContentMD5()); + assertEquals(returnedContent, dataString); + + client.deleteObject(bucketName, keyName); + client.deleteBucket(bucketName); + } + +} diff --git a/src/test/java/cloud/localstack/SNSMessagingTest.java b/src/test/java/cloud/localstack/SNSMessagingTest.java new file mode 100644 index 0000000..0a5c222 --- /dev/null +++ b/src/test/java/cloud/localstack/SNSMessagingTest.java @@ -0,0 +1,49 @@ +package cloud.localstack; + +import cloud.localstack.utils.PromiseAsyncHandler; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.AmazonSNSAsync; +import com.amazonaws.services.sns.model.CreateTopicRequest; +import com.amazonaws.services.sns.model.CreateTopicResult; +import com.amazonaws.services.sns.model.PublishRequest; +import com.amazonaws.services.sns.model.PublishResult; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; + +import javax.jms.JMSException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +/** + * Test integration of SNS messaging with LocalStack + */ +@RunWith(LocalstackTestRunner.class) +public class SNSMessagingTest { + private static final String TOPIC = "topic"; + + @Test + public void testSendMessage() throws JMSException { + final AmazonSNS clientSNS = TestUtils.getClientSNS(); + final CreateTopicResult createTopicResult = clientSNS.createTopic(TOPIC); + final PublishResult publishResult = clientSNS.publish(createTopicResult.getTopicArn(), "message"); + Assert.assertNotNull(publishResult); + } + + @Test + public void testSendMessageAsync() throws Exception { + final AmazonSNSAsync clientSNSAsync = TestUtils.getClientSNSAsync(); + final PromiseAsyncHandler createTopicPromise = new PromiseAsyncHandler<>(); + clientSNSAsync.createTopicAsync(TOPIC, createTopicPromise); + + final CompletableFuture publishResult = createTopicPromise.thenCompose(createTopicResult -> { + final PromiseAsyncHandler publishPromise = new PromiseAsyncHandler<>(); + clientSNSAsync.publishAsync(createTopicResult.getTopicArn(), "message", publishPromise); + return publishPromise; + }); + + final PublishResult result = publishResult.get(3, TimeUnit.SECONDS); + Assert.assertNotNull(result); + } + +} \ No newline at end of file diff --git a/src/test/java/cloud/localstack/SQSMessagingTest.java b/src/test/java/cloud/localstack/SQSMessagingTest.java new file mode 100644 index 0000000..fdeaf26 --- /dev/null +++ b/src/test/java/cloud/localstack/SQSMessagingTest.java @@ -0,0 +1,172 @@ +package cloud.localstack; + +import cloud.localstack.utils.PromiseAsyncHandler; +import cloud.localstack.docker.annotation.LocalstackDockerProperties; +import com.amazon.sqs.javamessaging.SQSConnection; +import com.amazon.sqs.javamessaging.SQSConnectionFactory; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSAsync; +import com.amazonaws.services.sqs.model.*; +import com.amazonaws.services.sqs.model.Message; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import static org.junit.Assert.*; + +import javax.jms.*; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +/** + * Test integration of SQS/JMS messaging with LocalStack + * Based on: https://bitbucket.org/atlassian/localstack/issues/24/not-support-sqs-in-jms + */ +@RunWith(LocalstackTestRunner.class) +@LocalstackDockerProperties(ignoreDockerRunErrors=true) +public class SQSMessagingTest { + + private static final String JMS_QUEUE_NAME = "aws_develop_class_jms"; + private static final String SAMPLE_QUEUE_NAME = "aws_develop_class"; + private static final String SAMPLE_MULTI_BYTE_CHAR_QUEUE_NAME = "aws_develop_multi_byte"; + + @BeforeClass + public static void setup() { + Map attributeMap = new HashMap<>(); + attributeMap.put("DelaySeconds", "0"); + attributeMap.put("MaximumMessageSize", "262144"); + attributeMap.put("MessageRetentionPeriod", "1209600"); + attributeMap.put("ReceiveMessageWaitTimeSeconds", "20"); + attributeMap.put("VisibilityTimeout", "30"); + + AmazonSQS client = TestUtils.getClientSQS(); + CreateQueueRequest createQueueRequest = new CreateQueueRequest(JMS_QUEUE_NAME).withAttributes(attributeMap); + CreateQueueResult result = client.createQueue(createQueueRequest); + Assert.assertNotNull(result); + + /* Disable SSL certificate checks for local testing */ + if (Localstack.useSSL()) { + TestUtils.disableSslCertChecking(); + } + } + + @Test + public void testSendMessage() throws JMSException { + SQSConnectionFactory connectionFactory = SQSConnectionFactory.builder().withEndpoint( + Localstack.INSTANCE.getEndpointSQS()).withAWSCredentialsProvider( + new AWSStaticCredentialsProvider(TestUtils.TEST_CREDENTIALS)).build(); + SQSConnection connection = connectionFactory.createConnection(); + connection.start(); + Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + Queue queue = session.createQueue(JMS_QUEUE_NAME); + + // send message + MessageProducer producer = session.createProducer(queue); + TextMessage message = session.createTextMessage("This is a message!"); + producer.send(message); + Assert.assertNotNull(message.getJMSMessageID()); + + // receive message + MessageConsumer consumer = session.createConsumer(queue); + TextMessage received = (TextMessage) consumer.receive(); + Assert.assertNotNull(received); + } + + @Test + public void testSendMessageAsync() throws Exception { + final AmazonSQSAsync clientSQSAsync = TestUtils.getClientSQSAsync(); + + final PromiseAsyncHandler createQueuePromise = new PromiseAsyncHandler<>(); + + clientSQSAsync.createQueueAsync(SAMPLE_QUEUE_NAME, createQueuePromise); + + final CompletableFuture queueUrl = createQueuePromise.thenCompose(createQueueResult -> { + final PromiseAsyncHandler sendMessagePromise = new PromiseAsyncHandler<>(); + clientSQSAsync.sendMessageAsync(createQueueResult.getQueueUrl(), "message", sendMessagePromise); + return sendMessagePromise.thenApply(e -> createQueueResult.getQueueUrl()); + }); + + final String queue = queueUrl.get(3, TimeUnit.SECONDS); + Assert.assertNotNull(queue); + + final PromiseAsyncHandler receiveMessagePromise = new PromiseAsyncHandler<>(); + clientSQSAsync.receiveMessageAsync(queue, receiveMessagePromise); + + final CompletableFuture receivedMessage = receiveMessagePromise.thenApply(e -> e.getMessages().get(0)); + + Assert.assertEquals(receivedMessage.get(3, TimeUnit.SECONDS).getBody(), "message"); + } + + @Test + public void testAsyncMessageAttributes() { + final AmazonSQSAsync sqsAsync = TestUtils.getClientSQSAsync(); + + final CreateQueueResult myqueue = sqsAsync.createQueue("myqueue"); + + final String attrValue = "a value to see"; + final SendMessageResult sendMessageResult = sqsAsync.sendMessage( + new SendMessageRequest() + .withQueueUrl(myqueue.getQueueUrl()) + .addMessageAttributesEntry("testKey", new MessageAttributeValue() + .withStringValue(attrValue).withDataType("String")) + .withMessageBody("Simple body") + ); + + final String messageId = sendMessageResult.getMessageId(); + + final ReceiveMessageRequest request = new ReceiveMessageRequest(myqueue.getQueueUrl()). + withMessageAttributeNames("All"); + final ReceiveMessageResult receiveMessageResult = sqsAsync.receiveMessage(request); + + final List messages = receiveMessageResult.getMessages(); + + final Optional messageOptional = messages.stream() + .filter(message -> messageId.equals(message.getMessageId())) + .findFirst(); + + final Message message = messageOptional.get(); + assertEquals(message.getBody(), "Simple body"); + assertEquals(message.getMessageAttributes().get("testKey").getStringValue(), attrValue); + assertEquals(message.getMessageAttributes().get("testKey").getDataType(), "String"); + } + + /** + * Test calculate md5 correct + * Based on: https://github.com/localstack/localstack/issues/1619 + */ + @Test + public void testSendMultiByteCharactersMessage() throws JMSException { + final AmazonSQS clientSQS = TestUtils.getClientSQS(); + final String queueUrl = clientSQS.createQueue(SAMPLE_MULTI_BYTE_CHAR_QUEUE_NAME).getQueueUrl(); + + /* + * send a message to the queue + */ + final String messageBody = "foo"; + final Map messageAttributes = new HashMap<>(); + messageAttributes.put("XXX", new MessageAttributeValue() + .withDataType("String") + .withStringValue("😇")); + final SendMessageRequest sendMessageRequest = new SendMessageRequest(); + sendMessageRequest.withMessageBody(messageBody); + sendMessageRequest.withQueueUrl(queueUrl); + sendMessageRequest.withMessageAttributes(messageAttributes); + final SendMessageResult sendMessageResult = clientSQS.sendMessage(sendMessageRequest); + + Assert.assertNotNull(sendMessageResult); + Assert.assertEquals("acbd18db4cc2f85cedef654fccc4a4d8", sendMessageResult.getMD5OfMessageBody()); + Assert.assertEquals("23bf3e5b587065b0cfbe95761641595a", sendMessageResult.getMD5OfMessageAttributes()); + + /* + * receive the message from the queue + */ + final ReceiveMessageResult messageResult = clientSQS.receiveMessage(queueUrl); + Assert.assertNotNull(messageResult); + } +} \ No newline at end of file diff --git a/src/test/java/cloud/localstack/deprecated/BasicFunctionalityTest.java b/src/test/java/cloud/localstack/deprecated/BasicFunctionalityTest.java new file mode 100644 index 0000000..65feb38 --- /dev/null +++ b/src/test/java/cloud/localstack/deprecated/BasicFunctionalityTest.java @@ -0,0 +1,299 @@ +package cloud.localstack.deprecated; + +import cloud.localstack.LocalTestUtil; +import cloud.localstack.sample.KinesisLambdaHandler; +import cloud.localstack.sample.S3Sample; +import cloud.localstack.sample.SQSLambdaHandler; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.model.ListStreamsResult; +import com.amazonaws.services.kinesis.model.PutRecordRequest; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.CreateEventSourceMappingRequest; +import com.amazonaws.services.lambda.model.CreateFunctionRequest; +import com.amazonaws.services.lambda.model.ListFunctionsResult; +import com.amazonaws.services.lambda.model.Runtime; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSAsync; +import com.amazonaws.services.sqs.AmazonSQSAsyncClientBuilder; +import com.amazonaws.services.sqs.model.CreateQueueRequest; +import com.amazonaws.services.sqs.model.CreateQueueResult; +import com.amazonaws.services.sqs.model.DeleteQueueRequest; +import com.amazonaws.services.sqs.model.GetQueueAttributesRequest; +import com.amazonaws.services.sqs.model.GetQueueAttributesResult; +import com.amazonaws.services.sqs.model.ListQueuesResult; +import com.amazonaws.services.sqs.model.QueueAttributeName; +import com.amazonaws.services.sqs.model.ReceiveMessageRequest; +import com.amazonaws.services.sqs.model.ReceiveMessageResult; +import com.amazonaws.services.sqs.model.SendMessageRequest; +import com.amazonaws.services.sqs.model.SendMessageResult; +import org.assertj.core.api.Assertions; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.runner.RunWith; + +import java.io.File; +import java.io.FileOutputStream; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.util.*; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import static cloud.localstack.TestUtils.TEST_CREDENTIALS; + +/** + * Simple class to test basic functionality and interaction with LocalStack. + * + * @author Waldemar Hummer + */ +@RunWith(LocalstackOutsideDockerTestRunner.class) +@ExtendWith(LocalstackExtension.class) +@Ignore +public class BasicFunctionalityTest { + + static { + /* + * Need to disable CBOR protocol, see: + * https://github.com/mhart/kinesalite/blob/master/README.md#cbor-protocol-issues-with-the-java-sdk + */ + TestUtils.setEnv("AWS_CBOR_DISABLE", "1"); + /* Disable SSL certificate checks for local testing */ + if (Localstack.useSSL()) { + TestUtils.disableSslCertChecking(); + } + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testDevEnvironmentSetup() { + Assertions.assertThat(Localstack.isDevEnvironment()).isTrue(); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testLocalKinesisAPI() throws InterruptedException { + AmazonKinesis kinesis = TestUtils.getClientKinesis(); + ListStreamsResult streams = kinesis.listStreams(); + Assertions.assertThat(streams.getStreamNames()).isNotNull(); + String streamName = UUID.randomUUID().toString(); + kinesis.createStream(streamName, 1); + // sleep required because of kinesalite + Thread.sleep(500); + // put record to stream + PutRecordRequest req = new PutRecordRequest(); + req.setPartitionKey("foobar-key"); + req.setData(ByteBuffer.wrap("{}".getBytes())); + req.setStreamName(streamName); + kinesis.putRecord(req); + final ByteBuffer data = ByteBuffer.wrap("{\"test\":\"test\"}".getBytes()); + kinesis.putRecord(streamName, data, "partition-key"); + // clean up + kinesis.deleteStream(streamName); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testKinesisLambdaIntegration() throws Exception { + AmazonKinesis kinesis = TestUtils.getClientKinesis(); + AWSLambda lambda = TestUtils.getClientLambda(); + String functionName = UUID.randomUUID().toString(); + String streamName = UUID.randomUUID().toString(); + + // create function + CreateFunctionRequest request = new CreateFunctionRequest(); + request.setFunctionName(functionName); + request.setRuntime(Runtime.Java8); + request.setCode(LocalTestUtil.createFunctionCode(KinesisLambdaHandler.class)); + request.setHandler(KinesisLambdaHandler.class.getName()); + request.setRole("r1"); + lambda.createFunction(request); + + // create stream + kinesis.createStream(streamName, 1); + Thread.sleep(500); + String streamArn = kinesis.describeStream(streamName).getStreamDescription().getStreamARN(); + + // create mapping + CreateEventSourceMappingRequest mapping = new CreateEventSourceMappingRequest(); + mapping.setFunctionName(functionName); + mapping.setEventSourceArn(streamArn); + mapping.setStartingPosition("LATEST"); + lambda.createEventSourceMapping(mapping); + + // push event + kinesis.putRecord(streamName, ByteBuffer.wrap("{\"foo\": \"bar\"}".getBytes()), "partitionKey1"); + // TODO: have Lambda store the record to S3, retrieve it from there, compare result + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testSQSLambdaIntegration() throws Exception { + AmazonSQS clientSQS = TestUtils.getClientSQS(); + AWSLambda lambda = TestUtils.getClientLambda(); + AmazonS3 s3 = TestUtils.getClientS3(); + String functionName = UUID.randomUUID().toString(); + String sqsQueueName = UUID.randomUUID().toString(); + + // create function + CreateFunctionRequest request = new CreateFunctionRequest(); + request.setFunctionName(functionName); + request.setRuntime(Runtime.Java8); + request.setRole("r1"); + request.setCode(LocalTestUtil.createFunctionCode(SQSLambdaHandler.class)); + request.setHandler(SQSLambdaHandler.class.getName()); + lambda.createFunction(request); + + // create stream + CreateQueueResult queue = clientSQS.createQueue(sqsQueueName); + Thread.sleep(500); + GetQueueAttributesResult queueAttributes = clientSQS.getQueueAttributes(new GetQueueAttributesRequest() + .withQueueUrl(queue.getQueueUrl()) + .withAttributeNames(QueueAttributeName.QueueArn)); + String queueArn = queueAttributes.getAttributes().get(QueueAttributeName.QueueArn.name()); + + // create mapping + CreateEventSourceMappingRequest mapping = new CreateEventSourceMappingRequest(); + mapping.setFunctionName(functionName); + mapping.setEventSourceArn(queueArn); + lambda.createEventSourceMapping(mapping); + + // create a s3 bucket + String testBucket = UUID.randomUUID().toString(); + s3.createBucket(testBucket); + + // push event + clientSQS.sendMessage(queue.getQueueUrl(), testBucket); + + Runnable check = new Runnable() { + public void run() { + // Assert that file has been written by Lambda + ObjectListing objectListing = s3.listObjects(testBucket); + Assertions.assertThat(objectListing.getObjectSummaries()).hasSize(1); + String key = objectListing.getObjectSummaries().get(0).getKey(); + Assertions.assertThat(key).startsWith(SQSLambdaHandler.fileName[0]); + Assertions.assertThat(key).endsWith(SQSLambdaHandler.fileName[1]); + String message = s3.getObjectAsString(testBucket, key); + Assertions.assertThat(message).isEqualTo(SQSLambdaHandler.DID_YOU_GET_THE_MESSAGE); + } + }; + + LocalTestUtil.retry(check, 5, 1); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testSQSQueueAttributes() { + // Based on https://github.com/localstack/localstack/issues/1551 + + AwsClientBuilder.EndpointConfiguration endpoint = TestUtils.getEndpointConfigurationSQS(); + + ClientConfiguration cc = new ClientConfiguration(); + cc.setProtocol(Protocol.HTTP); + + AmazonSQSAsync sqsAsync = AmazonSQSAsyncClientBuilder.standard() + .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("foo", "foo"))) + .withEndpointConfiguration(endpoint) + .withClientConfiguration(cc) + .build(); + + CreateQueueResult result1 = sqsAsync.createQueue("1551-test"); + CreateQueueResult result2 = sqsAsync.createQueue("1551-test-dlq"); + + final String queueArn = "QueueArn"; + GetQueueAttributesResult dlqQueueAttributes = sqsAsync.getQueueAttributes(result2.getQueueUrl(), + Collections.singletonList(queueArn)); + dlqQueueAttributes.getAttributes().get(queueArn); + + // set queue attributes + final Map attributes = new HashMap<>(); + attributes.put("VisibilityTimeout", "60"); + attributes.put("MessageRetentionPeriod", "345600"); + attributes.put("RedrivePolicy", "{\"foo\":1}"); + final String queueUrl = result1.getQueueUrl(); + sqsAsync.setQueueAttributes(queueUrl, attributes); + + // get and assert queue attributes + Map result = sqsAsync.getQueueAttributes(queueUrl, Arrays.asList("All")).getAttributes(); + Assert.assertEquals(result.get("MessageRetentionPeriod"), "345600"); + Assert.assertEquals(result.get("VisibilityTimeout"), "60"); + Assert.assertEquals(result.get("RedrivePolicy"), "{\"foo\":1}"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testLocalS3API() throws Exception { + AmazonS3 s3 = TestUtils.getClientS3(); + List buckets = s3.listBuckets(); + Assertions.assertThat(buckets).isNotNull(); + + // run S3 sample + String s3Endpoint = Localstack.INSTANCE.getEndpointS3(); + S3Sample.runTest(TEST_CREDENTIALS, s3Endpoint); + + // run example with ZIP file upload + String testBucket = UUID.randomUUID().toString(); + s3.createBucket(testBucket); + File file = Files.createTempFile("localstack", "s3").toFile(); + file.deleteOnExit(); + ZipOutputStream zipOutputStream = new ZipOutputStream(new FileOutputStream(file)); + zipOutputStream.putNextEntry(new ZipEntry("Some content")); + zipOutputStream.write("Some text content".getBytes()); + zipOutputStream.closeEntry(); + zipOutputStream.close(); + s3.putObject(testBucket, file.getName(), file); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testLocalLambdaAPI() { + AWSLambda lambda = TestUtils.getClientLambda(); + ListFunctionsResult functions = lambda.listFunctions(); + Assertions.assertThat(functions.getFunctions()).isNotNull(); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testLocalSQSAPI() { + AmazonSQS sqs = TestUtils.getClientSQS(); + ListQueuesResult queues = sqs.listQueues(); + Assertions.assertThat(queues.getQueueUrls()).isNotNull(); + + for (String queueName : Arrays.asList("java_test_queue", "java_test_queue.fifo")) { + // create queue + CreateQueueRequest createQueueRequest = new CreateQueueRequest(); + createQueueRequest.setQueueName(queueName); + CreateQueueResult newQueue = sqs.createQueue(createQueueRequest); + String queueUrl = newQueue.getQueueUrl(); + + // send message + SendMessageRequest send = new SendMessageRequest(queueUrl, "body"); + SendMessageResult sendResult = sqs.sendMessage(send); + Assertions.assertThat(sendResult.getMD5OfMessageBody()).isNotNull(); + + // receive message + ReceiveMessageRequest request = new ReceiveMessageRequest(queueUrl); + request.setWaitTimeSeconds(1); + request.setMaxNumberOfMessages(1); + request.setMessageAttributeNames(Arrays.asList("All")); + request.setAttributeNames(Arrays.asList("All")); + ReceiveMessageResult result = sqs.receiveMessage(request); + Assertions.assertThat(result.getMessages()).isNotNull().hasSize(1); + + // delete queue + DeleteQueueRequest deleteQueue = new DeleteQueueRequest(); + deleteQueue.setQueueUrl(queueUrl); + sqs.deleteQueue(deleteQueue); + } + } + +} diff --git a/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java b/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java new file mode 100644 index 0000000..9d32d80 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java @@ -0,0 +1,207 @@ +package cloud.localstack.docker; + +import cloud.localstack.Localstack; +import cloud.localstack.LocalstackTestRunner; +import cloud.localstack.TestUtils; +import cloud.localstack.docker.annotation.LocalstackDockerProperties; +import com.amazon.sqs.javamessaging.SQSConnection; +import com.amazon.sqs.javamessaging.SQSConnectionFactory; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.services.cloudwatch.*; +import com.amazonaws.services.cloudwatch.model.*; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; +import com.amazonaws.services.dynamodbv2.model.CreateTableRequest; +import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; +import com.amazonaws.services.dynamodbv2.model.KeyType; +import com.amazonaws.services.dynamodbv2.model.ListTablesResult; +import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput; +import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.model.CreateStreamRequest; +import com.amazonaws.services.kinesis.model.ListStreamsResult; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import com.amazonaws.services.secretsmanager.model.CreateSecretRequest; +import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.CreateQueueRequest; +import com.amazonaws.services.sqs.model.ListQueuesResult; +import com.amazonaws.util.IOUtils; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.runner.RunWith; +import org.junit.Assert; + +import javax.jms.MessageConsumer; +import javax.jms.MessageProducer; +import javax.jms.Queue; +import javax.jms.Session; +import javax.jms.TextMessage; +import java.io.File; +import java.io.FileOutputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@RunWith(LocalstackTestRunner.class) +@ExtendWith(LocalstackDockerExtension.class) +@LocalstackDockerProperties(randomizePorts = true) +public class BasicDockerFunctionalityTest { + + static { + TestUtils.setEnv("AWS_CBOR_DISABLE", "1"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testSecretsManager() throws Exception { + AWSSecretsManager secretsManager = TestUtils.getClientSecretsManager(); + + CreateSecretRequest createSecretRequest = new CreateSecretRequest(); + createSecretRequest.setName("my-secret-name"); + createSecretRequest.setSecretString("this is a secret thing"); + secretsManager.createSecret(createSecretRequest); + + GetSecretValueRequest getSecretValueRequest = new GetSecretValueRequest().withSecretId("my-secret-name"); + String result = secretsManager.getSecretValue(getSecretValueRequest).getSecretString(); + Assertions.assertThat(result).isEqualTo("this is a secret thing"); + + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testKinesis() throws Exception { + AmazonKinesis kinesis = TestUtils.getClientKinesis(); + + ListStreamsResult streamsResult = kinesis.listStreams(); + + Assertions.assertThat(streamsResult.getStreamNames()).isEmpty(); + + CreateStreamRequest createStreamRequest = new CreateStreamRequest() + .withStreamName("test-stream") + .withShardCount(2); + + kinesis.createStream(createStreamRequest); + + streamsResult = kinesis.listStreams(); + Assertions.assertThat(streamsResult.getStreamNames()).contains("test-stream"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testDynamo() throws Exception { + AmazonDynamoDB dynamoDB = TestUtils.getClientDynamoDB(); + + ListTablesResult tablesResult = dynamoDB.listTables(); + Assertions.assertThat(tablesResult.getTableNames()).hasSize(0); + + CreateTableRequest createTableRequest = new CreateTableRequest() + .withTableName("test.table") + .withKeySchema(new KeySchemaElement("identifier", KeyType.HASH)) + .withAttributeDefinitions(new AttributeDefinition("identifier", ScalarAttributeType.S)) + .withProvisionedThroughput(new ProvisionedThroughput(10L, 10L)); + dynamoDB.createTable(createTableRequest); + + tablesResult = dynamoDB.listTables(); + Assertions.assertThat(tablesResult.getTableNames()).contains("test.table"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testS3() throws Exception { + AmazonS3 client = TestUtils.getClientS3(); + + client.createBucket("test-bucket"); + List bucketList = client.listBuckets(); + + Assertions.assertThat(bucketList).hasSize(1); + + File file = File.createTempFile("localstack", "s3"); + file.deleteOnExit(); + + try (FileOutputStream stream = new FileOutputStream(file)) { + String content = "HELLO WORLD!"; + stream.write(content.getBytes()); + } + + PutObjectRequest request = new PutObjectRequest("test-bucket", "testData", file); + client.putObject(request); + + ObjectListing listing = client.listObjects("test-bucket"); + Assertions.assertThat(listing.getObjectSummaries()).hasSize(1); + + S3Object s3Object = client.getObject("test-bucket", "testData"); + String resultContent = IOUtils.toString(s3Object.getObjectContent()); + + Assertions.assertThat(resultContent).isEqualTo("HELLO WORLD!"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testSQS() throws Exception { + AmazonSQS client = TestUtils.getClientSQS(); + + Map attributeMap = new HashMap<>(); + attributeMap.put("DelaySeconds", "0"); + attributeMap.put("MaximumMessageSize", "262144"); + attributeMap.put("MessageRetentionPeriod", "1209600"); + attributeMap.put("ReceiveMessageWaitTimeSeconds", "20"); + attributeMap.put("VisibilityTimeout", "30"); + + CreateQueueRequest createQueueRequest = new CreateQueueRequest("test-queue").withAttributes(attributeMap); + client.createQueue(createQueueRequest); + + ListQueuesResult listQueuesResult = client.listQueues(); + Assertions.assertThat(listQueuesResult.getQueueUrls()).hasSize(1); + + SQSConnection connection = createSQSConnection(); + connection.start(); + Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + Queue queue = session.createQueue("test-queue"); + + MessageProducer producer = session.createProducer(queue); + TextMessage message = session.createTextMessage("Hello World!"); + producer.send(message); + + MessageConsumer consumer = session.createConsumer(queue); + TextMessage received = (TextMessage) consumer.receive(); + Assertions.assertThat(received.getText()).isEqualTo("Hello World!"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testCloudWatch() throws Exception { + AmazonCloudWatch client = TestUtils.getClientCloudWatch(); + Dimension dimension = new Dimension() + .withName("UNIQUE_PAGES") + .withValue("URLS"); + MetricDatum datum = new MetricDatum() + .withMetricName("PAGES_VISITED") + .withUnit(StandardUnit.None) + .withDimensions(dimension); + PutMetricDataRequest request = new PutMetricDataRequest() + .withNamespace("SITE/TRAFFIC") + .withMetricData(datum); + // assert no error gets thrown for null values + datum.setValue(null); + PutMetricDataResult response = client.putMetricData(request); + Assert.assertNotNull(response); + // assert success for double values + datum.setValue(123.4); + response = client.putMetricData(request); + Assert.assertNotNull(response); + } + + private SQSConnection createSQSConnection() throws Exception { + SQSConnectionFactory connectionFactory = SQSConnectionFactory.builder().withEndpoint( + Localstack.INSTANCE.getEndpointSQS()).withAWSCredentialsProvider( + new AWSStaticCredentialsProvider(TestUtils.TEST_CREDENTIALS)).build(); + return connectionFactory.createConnection(); + } +} diff --git a/src/test/java/cloud/localstack/docker/ContainerTest.java b/src/test/java/cloud/localstack/docker/ContainerTest.java new file mode 100644 index 0000000..efc0993 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/ContainerTest.java @@ -0,0 +1,88 @@ +package cloud.localstack.docker; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +public class ContainerTest { + + private static final String EXTERNAL_HOST_NAME = "localhost"; + private static final String MY_PROPERTY = "MY_PROPERTY"; + private static final String MY_VALUE = "MyValue"; + + private boolean pullNewImage = false; + + @Test + public void createLocalstackContainer() throws Exception { + + HashMap environmentVariables = new HashMap<>(); + environmentVariables.put(MY_PROPERTY, MY_VALUE); + Container localStackContainer = Container.createLocalstackContainer( + EXTERNAL_HOST_NAME, pullNewImage, true, null, environmentVariables, null); + + try { + localStackContainer.waitForAllPorts(EXTERNAL_HOST_NAME); + + // Test that environment variables are actually loaded + + ArrayList echoDefaultEnv = buildEchoStatement(Container.LOCALSTACK_EXTERNAL_HOSTNAME); + ArrayList echoExternalEnv = buildEchoStatement(MY_PROPERTY); + assertEquals(EXTERNAL_HOST_NAME, localStackContainer.executeCommand(echoDefaultEnv)); + assertEquals(MY_VALUE, localStackContainer.executeCommand(echoExternalEnv)); + } + finally { + localStackContainer.stop(); + } + } + + private ArrayList buildEchoStatement(String valueToEcho) { + ArrayList args = new ArrayList<>(); + args.add("bash"); + args.add("-c"); + args.add(String.format("echo $%s", valueToEcho)); + return args; + } + + + @Test + public void createLocalstackContainerWithRandomPorts() throws Exception { + Container container = Container.createLocalstackContainer( + EXTERNAL_HOST_NAME, pullNewImage, true, null, null, null); + + try { + container.waitForAllPorts(EXTERNAL_HOST_NAME); + + assertNotEquals(4567, container.getExternalPortFor(4567)); + assertNotEquals(4575, container.getExternalPortFor(4575)); + assertNotEquals(4583, container.getExternalPortFor(4583)); + assertNotEquals(4584, container.getExternalPortFor(4584)); + } + finally { + container.stop(); + } + } + + + @Test + public void createLocalstackContainerWithStaticPorts() throws Exception { + Container container = Container.createLocalstackContainer( + EXTERNAL_HOST_NAME, pullNewImage, false, null, null, null); + + try { + container.waitForAllPorts(EXTERNAL_HOST_NAME); + + assertEquals(4567, container.getExternalPortFor(4567)); + assertEquals(4575, container.getExternalPortFor(4575)); + assertEquals(4583, container.getExternalPortFor(4583)); + assertEquals(4584, container.getExternalPortFor(4584)); + } + finally { + container.stop(); + } + } + +} diff --git a/src/test/java/cloud/localstack/docker/DockerOnlySQSFunctionalityTest.java b/src/test/java/cloud/localstack/docker/DockerOnlySQSFunctionalityTest.java new file mode 100644 index 0000000..44fb3c4 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/DockerOnlySQSFunctionalityTest.java @@ -0,0 +1,104 @@ +package cloud.localstack.docker; + +import cloud.localstack.TestUtils; +import cloud.localstack.Localstack; +import cloud.localstack.docker.LocalstackDockerExtension; +import cloud.localstack.LocalstackTestRunner; +import cloud.localstack.docker.annotation.LocalstackDockerProperties; +import com.amazon.sqs.javamessaging.SQSConnection; +import com.amazon.sqs.javamessaging.SQSConnectionFactory; +import com.amazonaws.SdkClientException; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.CreateQueueRequest; +import com.amazonaws.services.sqs.model.ListQueuesResult; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.runner.RunWith; + +import javax.jms.MessageConsumer; +import javax.jms.MessageProducer; +import javax.jms.Queue; +import javax.jms.Session; +import javax.jms.TextMessage; +import java.util.HashMap; +import java.util.Map; + +@RunWith(LocalstackTestRunner.class) +@ExtendWith(LocalstackDockerExtension.class) +@LocalstackDockerProperties(randomizePorts = true, services = "sqs") +public class DockerOnlySQSFunctionalityTest { + + static { + TestUtils.setEnv("AWS_CBOR_DISABLE", "1"); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testKinesisNotRunning() { + final Throwable throwable = Assertions.catchThrowable(() -> TestUtils.getClientKinesis().listStreams()); + + Assertions.assertThat(throwable).isInstanceOf(SdkClientException.class); + } + + // Should throw SdkClientException + @org.junit.Test + @org.junit.jupiter.api.Test + public void testDynamoNotRunning() { + + final Throwable throwable = Assertions.catchThrowable(() -> TestUtils.getClientDynamoDB().listTables()); + + Assertions.assertThat(throwable).isInstanceOf(SdkClientException.class); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testS3NotRunning() { + final Throwable throwable = Assertions.catchThrowable(() -> TestUtils.getClientS3().createBucket + ("test-bucket")); + + Assertions.assertThat(throwable).isInstanceOf(SdkClientException.class); + } + + @org.junit.Test + @org.junit.jupiter.api.Test + public void testSQSRunning() throws Exception { + AmazonSQS client = TestUtils.getClientSQS(); + + Map attributeMap = new HashMap<>(); + attributeMap.put("DelaySeconds", "0"); + attributeMap.put("MaximumMessageSize", "262144"); + attributeMap.put("MessageRetentionPeriod", "1209600"); + attributeMap.put("ReceiveMessageWaitTimeSeconds", "20"); + attributeMap.put("VisibilityTimeout", "30"); + + CreateQueueRequest createQueueRequest = new CreateQueueRequest("test-queue").withAttributes(attributeMap); + client.createQueue(createQueueRequest); + + ListQueuesResult listQueuesResult = client.listQueues(); + + Assertions.assertThat(listQueuesResult.getQueueUrls()).hasSize(1); + + SQSConnection connection = createSQSConnection(); + connection.start(); + Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + Queue queue = session.createQueue("test-queue"); + + MessageProducer producer = session.createProducer(queue); + TextMessage message = session.createTextMessage("Hello World!"); + producer.send(message); + + MessageConsumer consumer = session.createConsumer(queue); + TextMessage received = (TextMessage) consumer.receive(); + + Assertions.assertThat(received.getText()).isEqualTo("Hello World!"); + } + + private SQSConnection createSQSConnection() throws Exception { + SQSConnectionFactory connectionFactory = SQSConnectionFactory.builder().withEndpoint( + Localstack.INSTANCE.getEndpointSQS()).withAWSCredentialsProvider( + new AWSStaticCredentialsProvider(TestUtils.TEST_CREDENTIALS)).build(); + return connectionFactory.createConnection(); + } +} diff --git a/src/test/java/cloud/localstack/docker/Junit5NestedTest.java b/src/test/java/cloud/localstack/docker/Junit5NestedTest.java new file mode 100644 index 0000000..6212601 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/Junit5NestedTest.java @@ -0,0 +1,22 @@ +package cloud.localstack.docker; + +import cloud.localstack.docker.annotation.LocalstackDockerProperties; +import org.junit.Assert; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(LocalstackDockerExtension.class) +@LocalstackDockerProperties(randomizePorts = true, services = "sqs") +public class Junit5NestedTest { + + @Nested + class NestedClass { + + @Test + public void ShouldNotStartNewContainerInNestedTest() { + // This should not trigger an error by calling the beforeAll twice + Assert.assertTrue(true); + } + } +} diff --git a/src/test/java/cloud/localstack/docker/LocalstackDockerTest.java b/src/test/java/cloud/localstack/docker/LocalstackDockerTest.java new file mode 100644 index 0000000..03d6bb7 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/LocalstackDockerTest.java @@ -0,0 +1,61 @@ +package cloud.localstack.docker; + +import cloud.localstack.Localstack; +import cloud.localstack.TestUtils; +import cloud.localstack.docker.annotation.LocalstackDockerConfiguration; +import com.amazonaws.SdkClientException; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.SendMessageResult; +import org.junit.After; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.Collections; + +import static org.junit.Assert.assertNotNull; + +public class LocalstackDockerTest { + + private static final LocalstackDockerConfiguration DOCKER_CONFIG = LocalstackDockerConfiguration.builder() + .randomizePorts(true) + .build(); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void startup() { + Localstack localstackDocker = Localstack.INSTANCE; + Localstack.INSTANCE.startup(DOCKER_CONFIG); + + AmazonSQS amazonSQS = TestUtils.getClientSQS(); + String queueUrl = amazonSQS.createQueue("test-queue").getQueueUrl(); + + SendMessageResult sendMessageResult = amazonSQS.sendMessage(queueUrl, "test-message"); + assertNotNull(sendMessageResult); + + String messageId = sendMessageResult.getMessageId(); + assertNotNull(messageId); + + thrown.expect(IllegalStateException.class); + + Localstack.INSTANCE.startup(DOCKER_CONFIG); + Localstack.INSTANCE.stop(); + } + + @Test + public void stop() { + Localstack.INSTANCE.startup(DOCKER_CONFIG); + Localstack.INSTANCE.stop(); + + AmazonSQS amazonSQS = TestUtils.getClientSQS(); + thrown.expect(SdkClientException.class); + amazonSQS.createQueue("test-queue").getQueueUrl(); + } + + @After + public void tearDown() { + Localstack.INSTANCE.stop(); + } +} \ No newline at end of file diff --git a/src/test/java/cloud/localstack/docker/PortBindingTest.java b/src/test/java/cloud/localstack/docker/PortBindingTest.java new file mode 100644 index 0000000..b6c5b87 --- /dev/null +++ b/src/test/java/cloud/localstack/docker/PortBindingTest.java @@ -0,0 +1,29 @@ +package cloud.localstack.docker; + +import cloud.localstack.Localstack; +import cloud.localstack.LocalstackTestRunner; +import cloud.localstack.TestUtils; +import cloud.localstack.docker.annotation.LocalstackDockerProperties; + +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.runner.RunWith; +import org.junit.Assert; +import org.junit.Test; + +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.CreateQueueRequest; + +@RunWith(LocalstackTestRunner.class) +@ExtendWith(LocalstackDockerExtension.class) +@LocalstackDockerProperties(randomizePorts = false, services = { "sqs:12345" }) +public class PortBindingTest { + + @Test + public void testAccessPredefinedPort() { + String endpoint = Localstack.INSTANCE.endpointForPort(12345); + AmazonSQS amazonSQS = TestUtils.getClientSQS(endpoint); + String url = amazonSQS.createQueue("test-queue").getQueueUrl(); + Assert.assertTrue(url.contains("://localhost:12345/queue/test-queue")); + } + +} diff --git a/src/test/java/cloud/localstack/sample/KinesisLambdaHandler.java b/src/test/java/cloud/localstack/sample/KinesisLambdaHandler.java new file mode 100644 index 0000000..788d484 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/KinesisLambdaHandler.java @@ -0,0 +1,35 @@ +package cloud.localstack.sample; + +import java.util.Map; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.amazonaws.services.lambda.runtime.events.KinesisEvent; + +/** + * Test Lambda handler class triggered from a Kinesis event + */ +public class KinesisLambdaHandler implements RequestHandler { + + @Override + public Object handleRequest(Object event, Context context) { + if(event instanceof KinesisEvent) { + return handleRequest((KinesisEvent)event, context); + } + return handleRequest((Map)event, context); + } + + public Object handleRequest(Map event, Context context) { + System.err.println("Kinesis record: " + event); + return "{}"; + } + + public Object handleRequest(KinesisEvent event, Context context) { + for (KinesisEvent.KinesisEventRecord rec : event.getRecords()) { + String msg = new String(rec.getKinesis().getData().array()); + System.err.println("Kinesis record: " + msg); + } + return "{}"; + } + +} diff --git a/src/test/java/cloud/localstack/sample/LambdaHandler.java b/src/test/java/cloud/localstack/sample/LambdaHandler.java new file mode 100644 index 0000000..32232a1 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/LambdaHandler.java @@ -0,0 +1,17 @@ +package cloud.localstack.sample; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; + +/** + * Test Lambda handler class + */ +public class LambdaHandler implements RequestHandler { + + @Override + public Object handleRequest(Object event, Context context) { + System.err.println(event); + return event.getClass(); + } + +} diff --git a/src/test/java/cloud/localstack/sample/LambdaStreamHandler.java b/src/test/java/cloud/localstack/sample/LambdaStreamHandler.java new file mode 100644 index 0000000..22e6410 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/LambdaStreamHandler.java @@ -0,0 +1,25 @@ +package cloud.localstack.sample; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestStreamHandler; + +import java.io.*; +import java.util.stream.Collectors; + +/** + * Test Lambda stream handler class + */ +public class LambdaStreamHandler implements RequestStreamHandler { + + @Override + public void handleRequest(InputStream inputStream, OutputStream output, Context context) { + try { + BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream)); + String input = reader.lines().collect(Collectors.joining()); + System.err.println(input); + output.write("{}".getBytes()); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/src/test/java/cloud/localstack/sample/S3Sample.java b/src/test/java/cloud/localstack/sample/S3Sample.java new file mode 100644 index 0000000..62231f5 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/S3Sample.java @@ -0,0 +1,209 @@ +package cloud.localstack.sample; +/* + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import cloud.localstack.Localstack; +import com.amazonaws.AmazonClientException; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.profile.ProfileCredentialsProvider; +import com.amazonaws.regions.Region; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.S3ClientOptions; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectSummary; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.util.UUID; + +/** + * This sample demonstrates how to make basic requests to Amazon S3 using the + * AWS SDK for Java. + * + * Based on: https://github.com/aws/aws-sdk-java/blob/master/src/samples/AmazonS3/S3Sample.java + */ +public class S3Sample { + + public static void main(String[] args) throws IOException { + /* + * The ProfileCredentialsProvider will return your [default] + * credential profile by reading from the credentials file located at + * (~/.aws/credentials). + */ + AWSCredentials credentials = null; + try { + credentials = new ProfileCredentialsProvider().getCredentials(); + } catch (Exception e) { + throw new AmazonClientException( + "Cannot load the credentials from the credential profiles file. " + + "Please make sure that your credentials file is at the correct " + + "location (~/.aws/credentials), and is in valid format.", + e); + } + String s3Endpoint = Localstack.INSTANCE.getEndpointS3(); + runTest(credentials, s3Endpoint); + } + + public static void runTest(AWSCredentials credentials, String s3Endpoint) throws IOException { + + @SuppressWarnings("deprecation") + AmazonS3 s3 = new AmazonS3Client(credentials); + Region usWest2 = Region.getRegion(Regions.US_WEST_2); + s3.setRegion(usWest2); + s3.setEndpoint(s3Endpoint); + s3.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true) + .disableChunkedEncoding().build()); + + String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); + String key = "MyObjectKey"; + + /* + * Create a new S3 bucket - Amazon S3 bucket names are globally unique, + * so once a bucket name has been taken by any user, you can't create + * another bucket with that same name. + * + * You can optionally specify a location for your bucket if you want to + * keep your data closer to your applications or users. + */ + System.out.println("Creating bucket " + bucketName); + if (!s3.doesBucketExist(bucketName)) { + s3.createBucket(bucketName); + } + + /* + * List the buckets in your account + */ + System.out.println("Listing buckets"); + for (Bucket bucket : s3.listBuckets()) { + System.out.println(" - " + bucket.getName()); + } + + /* + * Upload an object to your bucket - You can easily upload a file to + * S3, or upload directly an InputStream if you know the length of + * the data in the stream. You can also specify your own metadata + * when uploading to S3, which allows you set a variety of options + * like content-type and content-encoding, plus additional metadata + * specific to your applications. + */ + System.out.println("Uploading a new object to S3 from a file"); + s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); + + /* + * Download an object - When you download an object, you get all of + * the object's metadata and a stream from which to read the contents. + * It's important to read the contents of the stream as quickly as + * possibly since the data is streamed directly from Amazon S3 and your + * network connection will remain open until you read all the data or + * close the input stream. + * + * GetObjectRequest also supports several other options, including + * conditional downloading of objects based on modification times, + * ETags, and selectively downloading a range of an object. + */ + System.out.println("Downloading an object"); + S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); + System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); + displayTextInputStream(object.getObjectContent()); + + /* + * List objects in your bucket by prefix - There are many options for + * listing the objects in your bucket. Keep in mind that buckets with + * many objects might truncate their results when listing their objects, + * so be sure to check if the returned object listing is truncated, and + * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve + * additional results. + */ + System.out.println("Listing objects"); + ObjectListing objectListing = s3.listObjects(new ListObjectsRequest() + .withBucketName(bucketName) + .withPrefix("My")); + for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { + System.out.println(" - " + objectSummary.getKey() + " " + + "(size = " + objectSummary.getSize() + ")"); + } + + /* + * Delete an object - Unless versioning has been turned on for your bucket, + * there is no way to undelete an object, so use caution when deleting objects. + */ + System.out.println("Deleting an object"); + s3.deleteObject(bucketName, key); + + /* + * Delete a bucket - A bucket must be completely empty before it can be + * deleted, so remember to delete any objects from your buckets before + * you try to delete them. + */ + System.out.println("Deleting bucket " + bucketName); + s3.deleteBucket(bucketName); + } + + /** + * Creates a temporary file with text data to demonstrate uploading a file + * to Amazon S3 + * + * @return A newly created temporary file with text data. + * + * @throws IOException + */ + private static File createSampleFile() throws IOException { + File file = File.createTempFile("aws-java-sdk-", ".txt"); + file.deleteOnExit(); + + Writer writer = new OutputStreamWriter(new FileOutputStream(file)); + writer.write("abcdefghijklmnopqrstuvwxyz\n"); + writer.write("01234567890112345678901234\n"); + writer.write("!@#$%^&*()-=[]{};':',.<>/?\n"); + writer.write("01234567890112345678901234\n"); + writer.write("abcdefghijklmnopqrstuvwxyz\n"); + writer.close(); + + return file; + } + + /** + * Displays the contents of the specified input stream as text. + * + * @param input + * The input stream to display as text. + * + * @throws IOException + */ + private static void displayTextInputStream(InputStream input) throws IOException { + BufferedReader reader = new BufferedReader(new InputStreamReader(input)); + while (true) { + String line = reader.readLine(); + if (line == null) break; + + System.out.println(" " + line); + } + System.out.println(); + } + +} diff --git a/src/test/java/cloud/localstack/sample/SQSLambdaHandler.java b/src/test/java/cloud/localstack/sample/SQSLambdaHandler.java new file mode 100644 index 0000000..7026da5 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/SQSLambdaHandler.java @@ -0,0 +1,55 @@ +package cloud.localstack.sample; + +import cloud.localstack.TestUtils; +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.amazonaws.services.lambda.runtime.events.SQSEvent; +import com.amazonaws.services.s3.AmazonS3; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; + +public class SQSLambdaHandler implements RequestHandler { + + public static final String[] fileName = { "sqsLambda", "test" }; + public static final String DID_YOU_GET_THE_MESSAGE = "Did you get the message?"; + protected AmazonS3 clientS3; + + public SQSLambdaHandler() { + try { + clientS3 = TestUtils.getClientS3(); + } catch (Exception e) { + // fall back to deprecated TestUtils + clientS3 = cloud.localstack.deprecated.TestUtils.getClientS3(); + } + } + + @Override + public Object handleRequest(SQSEvent event, Context context) { + for (SQSEvent.SQSMessage message : event.getRecords()) { + File file = getFile(DID_YOU_GET_THE_MESSAGE); + clientS3.putObject(message.getBody(), file.getName(), file); + } + + return "{}"; + } + + private File getFile(String message) { + File file = null; + try { + file = Files.createTempFile(fileName[0], fileName[1]).toFile(); + file.deleteOnExit(); + BufferedWriter bw = new BufferedWriter(new FileWriter(file)); + bw.write(message); + bw.close(); + + } catch (IOException e) { + e.printStackTrace(); + } + return file; + } + +} diff --git a/src/test/java/cloud/localstack/sample/SerializedInputLambdaHandler.java b/src/test/java/cloud/localstack/sample/SerializedInputLambdaHandler.java new file mode 100644 index 0000000..55beef1 --- /dev/null +++ b/src/test/java/cloud/localstack/sample/SerializedInputLambdaHandler.java @@ -0,0 +1,52 @@ +package cloud.localstack.sample; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; + +/** + * Test Lambda handler class + */ +public class SerializedInputLambdaHandler implements RequestHandler { + + @Override + public Object handleRequest(S3Input input, Context context) { + System.err.println(input); + input.setValidated(true); + return input; + } + + public static class S3Input { + + public S3Input() {} + + private String bucket; + + private String key; + + private boolean validated = false; + + public String getBucket() { + return bucket; + } + + public void setBucket(String bucket) { + this.bucket = bucket; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public void setValidated(boolean validated) { + this.validated = validated; + } + + public boolean isValidated() { + return validated; + } + } +} diff --git a/src/test/java/cloud/localstack/testcontainers/TestContainersSqsTest.java b/src/test/java/cloud/localstack/testcontainers/TestContainersSqsTest.java new file mode 100644 index 0000000..08f2717 --- /dev/null +++ b/src/test/java/cloud/localstack/testcontainers/TestContainersSqsTest.java @@ -0,0 +1,128 @@ +package cloud.localstack.testcontainers; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.LogMessageWaitStrategy; + +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSClientBuilder; +import com.amazonaws.services.sqs.model.CreateQueueResult; +import com.amazonaws.services.sqs.model.Message; +import com.amazonaws.services.sqs.model.ReceiveMessageResult; +import com.amazonaws.services.sqs.model.SendMessageResult; + +/** + *

+ * This test is used to ensure that the bug of #308 is fixed. + *

+ *

+ * In this test the localstack docker images will be started by the testcontainers framework. + * SQS will then be used to send some messages. + *

+ *

+ * The goal of this test is to check that the random port mapping of testcontainers is working with localstack. + *

+ */ +public class TestContainersSqsTest { + + private static final String DOCKER_IMAGE_NAME = "localstack/localstack:latest"; + + private static final int SQS_PORT = 4576; + + private AmazonSQS amazonSQS; + + private GenericContainer genericContainer; + + @Before + public void before() { + startDockerImage(); + createSqsClient(); + } + + @After + public void after() { + genericContainer.stop(); + } + + private void createSqsClient() { + + /* + * get the randomly generated SQS port + */ + final Integer mappedPort = genericContainer.getMappedPort(SQS_PORT); + + /* + * create the SQS client + */ + final AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration( + "http://localhost:" + mappedPort, + "us-east-1"); + + final AWSStaticCredentialsProvider awsStaticCredentialsProvider = new AWSStaticCredentialsProvider( + new BasicAWSCredentials("accesskey", "secretkey")); + + amazonSQS = AmazonSQSClientBuilder + .standard() + .withEndpointConfiguration(endpointConfiguration) + .withCredentials(awsStaticCredentialsProvider) + .build(); + } + + @Test + public void sendAndReceiveMessageTest() { + + /* + * create the queue + */ + final CreateQueueResult queue = amazonSQS.createQueue("test-queue"); + final String queueUrl = queue.getQueueUrl(); + + /* + * send a message to the queue + */ + final String messageBody = "test-message"; + final SendMessageResult sendMessageResult = amazonSQS.sendMessage(queueUrl, messageBody); + assertNotNull(sendMessageResult); + + final String messageId = sendMessageResult.getMessageId(); + assertNotNull(messageId); + + /* + * receive the message from the queue + */ + final ReceiveMessageResult messageResult = amazonSQS.receiveMessage(queueUrl); + assertNotNull(messageResult); + + /* + * compare results + */ + final List messages = messageResult.getMessages(); + assertNotNull(messages); + assertEquals(1, messages.size()); + + final Message message = messages.get(0); + assertEquals(messageId, message.getMessageId()); + assertEquals(messageBody, message.getBody()); + + } + + @SuppressWarnings("resource") + private void startDockerImage() { + + genericContainer = new GenericContainer<>(DOCKER_IMAGE_NAME) + .withExposedPorts(SQS_PORT) + .waitingFor(new LogMessageWaitStrategy().withRegEx(".*Ready\\.\n")); + + genericContainer.start(); + } +} diff --git a/src/test/java/cloud/localstack/utils/PromiseAsyncHandler.java b/src/test/java/cloud/localstack/utils/PromiseAsyncHandler.java new file mode 100644 index 0000000..b12e231 --- /dev/null +++ b/src/test/java/cloud/localstack/utils/PromiseAsyncHandler.java @@ -0,0 +1,18 @@ +package cloud.localstack.utils; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.handlers.AsyncHandler; + +import java.util.concurrent.CompletableFuture; + +public class PromiseAsyncHandler extends CompletableFuture implements AsyncHandler { + @Override + public void onError(Exception exception) { + completeExceptionally(exception); + } + + @Override + public void onSuccess(T request, R r) { + complete(r); + } +} diff --git a/src/test/resources/DDBEventLambda.json b/src/test/resources/DDBEventLambda.json new file mode 100644 index 0000000..5515de0 --- /dev/null +++ b/src/test/resources/DDBEventLambda.json @@ -0,0 +1,66 @@ +{ + "Records": [ + { + "eventID": "1", + "eventVersion": "1.0", + "dynamodb": { + "Keys": { + "id": { + "S": "uniqueID" + } + }, + "OldImage": {}, + "SizeBytes": 1369, + "StreamViewType": "NEW_AND_OLD_IMAGES", + "NewImage": { + "status": { + "M": { + "subType": { + "S": "LAPSED" + }, + "type": { + "S": "SUBSCRIBED" + } + } + }, + "startDate": { + "S": "2012-04-23T18:25:43.511Z" + }, + "devices": { + "SS": [ + "deviceID" + ] + }, + "numbers": { + "NS": [1,3,5,6] + }, + "number": { + "N": 1 + }, + "list": { + "L": [ + { + "M": { + "product": { + "M": { + "sku": { + "S": "123456" + }, + "partner": { + "S": "partner" + } + } + } + } + } + ] + } + } + }, + "awsRegion": "us-east-1", + "eventName": "INSERT", + "eventSourceARN": "arn:aws:dynamodb:us-east-1:000000000000:table/test-table", + "eventSource": "aws:dynamodb" + } + ] +} diff --git a/src/test/resources/S3EventLambda.json b/src/test/resources/S3EventLambda.json new file mode 100644 index 0000000..a0182a4 --- /dev/null +++ b/src/test/resources/S3EventLambda.json @@ -0,0 +1,39 @@ +{ + "Records": [ + { + "eventVersion": "2.0", + "eventTime": "2018-08-23T21:41:36.511Z", + "requestParameters": { + "sourceIPAddress": "127.0.0.1" + }, + "s3": { + "configurationId": "testConfigRule", + "object": { + "versionId": "096fKKXTRTtl3on89fVO.nfljtsv6qko", + "eTag": "d41d8cd98f00b204e9800998ecf8427e", + "key": "key/file.txt", + "sequencer": "0055AED6DCD90281E5", + "size": 1024 + }, + "bucket": { + "ownerIdentity": { + "principalId": "A3NL1KOZZKExample" + }, + "name": "bucket-name", + "arn": "arn:aws:s3:::bucket-name" + }, + "s3SchemaVersion": "1.0" + }, + "responseElements": { + "x-amz-id-2": "eftixk72aD6Ap51TnqcoF8eFidJG9Z/2", + "x-amz-request-id": "8a0c0d15" + }, + "awsRegion": "us-east-1", + "eventName": "ObjectCreated:Put", + "userIdentity": { + "principalId": "AIDAJDPLRKLG7UEXAMPLE" + }, + "eventSource": "aws:s3" + } + ] +} \ No newline at end of file diff --git a/src/test/resources/SqsEvent.json b/src/test/resources/SqsEvent.json new file mode 100644 index 0000000..9bca6c5 --- /dev/null +++ b/src/test/resources/SqsEvent.json @@ -0,0 +1,40 @@ +{ + "records": [ + { + "messageId" : "MessageID_1", + "receiptHandle" : "MessageReceiptHandle", + "body" : "Message Body", + "md5OfBody" : "fce0ea8dd236ccb3ed9b37dae260836f", + "md5OfMessageAttributes" : "582c92c5c5b6ac403040a4f3ab3115c9", + "eventSourceArn": "arn:aws:sqs:us-west-2:000000000000:SQSQueue", + "eventSource": "aws:sqs", + "awsRegion": "us-west-2", + "attributes" : { + "ApproximateReceiveCount" : "2", + "SentTimestamp" : "1520621625029", + "SenderId" : "AROAIWPX5BD2BHG722MW4:sender", + "ApproximateFirstReceiveTimestamp" : "1520621634884" + }, + "messageAttributes" : { + "Attribute3" : { + "binaryValue" : "MTEwMA==", + "stringListValues" : ["abc", "123"], + "binaryListValues" : ["MA==", "MQ==", "MA=="], + "dataType" : "Binary" + }, + "Attribute2" : { + "stringValue" : "123", + "stringListValues" : [ ], + "binaryListValues" : ["MQ==", "MA=="], + "dataType" : "Number" + }, + "Attribute1" : { + "stringValue" : "AttributeValue1", + "stringListValues" : [ ], + "binaryListValues" : [ ], + "dataType" : "String" + } + } + } + ] +} diff --git a/src/test/resources/logback.xml b/src/test/resources/logback.xml new file mode 100644 index 0000000..85f1443 --- /dev/null +++ b/src/test/resources/logback.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file