- Download http://www.spread.org/download/spread-src-5.0.1.tar.gz
- Generate local makefile variables, exec
./configure
- Try to build, exec
make install
interface Function<T, R> { | |
R apply(T t); | |
} | |
class Solution { | |
public static void main(String[] args) { | |
Result<Float> r = fetch("13").then(Solution::parseFloat); | |
System.out.println(r); |
❯ terraform init | |
Initializing modules... | |
Initializing the backend... | |
Initializing provider plugins... | |
- Reusing previous version of hashicorp/vault from the dependency lock file | |
- Reusing previous version of hashicorp/time from the dependency lock file | |
- Reusing previous version of hashicorp/nomad from the dependency lock file | |
- Using previously-installed hashicorp/time v0.7.2 |
~/src/github.com/github/super-linter-issue main !2 ?1 clear 1 ✘ 10:07:39 | |
~/src/github.com/github/super-linter-issue main !2 ?1 docker run -it \ ✔ 10:07:43 | |
> -v "${PWD}:/tmp/lint" \ | |
> -e "RUN_LOCAL=true" \ | |
> -e "DEFAULT_WORKSPACE=/tmp/lint" \ | |
> -e "LINTER_RULES_PATH=/tmp/lint/custom/linters" \ | |
> -e "ACTIONS_RUNNER_DEBUG=true" \ | |
> github/super-linter:v3 | |
2020-12-12 09:07:46 [DEBUG] Setting FILE_ARRAY_ANSIBLE variable... | |
2020-12-12 09:07:46 [DEBUG] Setting FILE_ARRAY_ARM variable... |
// yours | |
@Test(expected = java.io.UnsupportedEncodingException.class) | |
public void testConvertBase64ToStringWithUnsupportedEncodingException() { | |
String data = "w4bDmMOF"; | |
TextConverter.convertBase64ToStringWithEncoding(data, "BadName");; | |
} | |
// favourite |
job "kafka" { | |
datacenters = ["dc1"] | |
type = "service" | |
constraint { | |
attribute = "${attr.kernel.name}" | |
value = "linux" | |
} | |
group "k-group" { |
.PHONY: all wait run-infrastructure kafka-create-topic minio-config-update kafka-event-consumer-run down | |
all: run-infrastructure wait kafka-create-topic minio-config-update | |
# sleep for 20 secs | |
# warm up containers | |
wait: | |
sleep 20 | |
## infrastructure: zookeeper kafka minio-server | |
run-infrastructure: | |
docker-compose up -d zookeeper kafka minio-server |
./configure
make install
@Test | |
public void test_data_pipeline_flow_successful() { | |
String id = UUID.randomUUID().toString(); | |
String from = UUID.randomUUID().toString(); | |
String to = UUID.randomUUID().toString(); | |
String text = UUID.randomUUID().toString(); | |
MessageJsonRepresentation messageJsonRepresentation = | |
new MessageJsonRepresentation(id, from, to, text); |
/** | |
* Environment container contains composition of containers which are declared | |
* in docker-compose.test.yml file. Use a local Docker Compose binary. | |
* Waiting strategies are applied to `service-name` with suffix `_1` | |
*/ | |
@ClassRule | |
public static DockerComposeContainer environment = | |
new DockerComposeContainer(new File("docker-compose.test.yml")) | |
.withLocalCompose(true) | |
.waitingFor("db-mock_1", Wait.forHttp("/").forStatusCode(200)) |
private static void createTopic(String topicName) { | |
// kafka container uses with embedded zookeeper | |
// confluent platform and Kafka compatibility 5.1.x <-> kafka 2.1.x | |
// kafka 2.1.x require option --zookeeper, later versions use --bootstrap-servers instead | |
String createTopic = | |
String.format( | |
"/usr/bin/kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic %s", | |
topicName); | |
try { | |
final Container.ExecResult execResult = kafka.execInContainer("/bin/sh", "-c", createTopic); |