Skip to content

Instantly share code, notes, and snippets.

# Reference configuration for the DataStax Java driver for Apache Cassandra®.
#
# Unless you use a custom mechanism to load your configuration (see
# SessionBuilder.withConfigLoader), all the values declared here will be used as defaults. You can
# place your own `application.conf` in the classpath to override them.
#
# Options are classified into two categories:
# - basic: what is most likely to be customized first when kickstarting a new application.
# - advanced: more elaborate tuning options, or "expert"-level customizations.
#
# Cassandra storage config YAML
# NOTE:
# See https://cassandra.apache.org/doc/latest/configuration/ for
# full explanations of configuration directives
# /NOTE
# The name of the cluster. This is mainly used to prevent machines in
# one logical cluster from joining another.
cluster_name: DATACENTER1
package com.example.myproject
import org.apache.flink.streaming.api.scala._
import scala.io.Source
import org.apache.flink.connector.kafka.source.{KafkaSource}
import org.apache.flink.connector.kafka.sink.{KafkaSink, KafkaRecordSerializationSchema}
// import org.apache.flink.formats.avro.{AvroSerializationSchema}
import org.apache.flink.formats.avro.registry.confluent.{ConfluentRegistryAvroDeserializationSchema, ConfluentRegistryAvroSerializationSchema}
import org.apache.flink.streaming.api.scala._
import scala.io.Source
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericRecord, GenericData}
import org.apache.flink.connector.kafka.source.{KafkaSource}
import org.apache.flink.connector.kafka.sink.{KafkaSink, KafkaRecordSerializationSchema}
import org.apache.flink.formats.avro.{AvroSerializationSchema}
// import org.apache.flink.formats.avro.typeutils.{GenericRecordAvroTypeInfo}
import org.apache.flink.streaming.api.scala._
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.avro.generic.GenericRecord
import org.apache.flink.connector.kafka.source.{KafkaSource}
import org.apache.flink.connector.kafka.sink.{KafkaSink, KafkaRecordSerializationSchema}
import org.apache.flink.formats.avro.{AvroSerializationSchema}
object Main extends App {
import 'package:flutter/material.dart';
import 'package:flutter_riverpod/flutter_riverpod.dart';
import 'package:shared_preferences/shared_preferences.dart';
main() async {
runApp(const MaterialApp(
home: Scaffold(body: Component())
));
}
import 'package:flutter/material.dart';
import 'package:go_router/go_router.dart';
import 'package:flutter_riverpod/flutter_riverpod.dart';
final score_state = StateProvider<int>((ref) => 0);
main() {
runApp(
ProviderScope(child: MaterialApp.router(
debugShowCheckedModeBanner: false,
import { gql, useMutation } from '@apollo/client'
const QUERY = gql`
mutation AddBook($title: String!, $published: Int!, $author: String!, $genres: [String!]!) {
addBook(title: $title, published: $published, author: $author, genres: $genres) {
title
published
author
genres
}
import React, { useState } from 'react';
import { render } from 'ink';
import Input from './components/input';
function Container() {
// LOCAL STATE
const [local, set_local] = useState({
name: 'testing'
})
import React, { Component } from 'react';
import { createStackNavigator, createAppContainer } from 'react-navigation';
import Home from './pages/home/home';
import Profile from './pages/profile/profile';
import Create from './pages/create/create';
class App extends Component {
render() { return (
<AppContainer />