I hereby claim:
- I am mfenniak on github.
- I am mfenniak (https://keybase.io/mfenniak) on keybase.
- I have a public key ASBszYEV6fmIrudDoq2iEuw_FwT_X1WqTd3ECfOA-wfF9Ao
To claim this, I am signing this object:
import Network.HTTP | |
import Network.URI | |
import Data.Either | |
data InstanceInfo = | |
InstanceInfo { | |
apiURL :: URI, | |
companyKey :: String, | |
loginName :: String, | |
password :: String |
import os.path | |
from flask import Flask | |
from flask.helpers import safe_join | |
# Injects an "mt" parameter on the URLs of static files that contains the | |
# last-modified time of the file. This allows the use of aggressive cache | |
# settings on static files, while ensuring that content changes are reflected | |
# immediately due to the new URLs. Note that if multiple servers have | |
# different mod times on files, this can cause static files to be reloaded more | |
# often than needed. |
import Ember from 'ember'; | |
// computedDependent is a wrapper around Ember.computed that promotes a property access pattern that | |
// ensures that a computed property declares its dependencies accurately. | |
// | |
// You create a computed property with computedDependent, passing in the getter function and the names | |
// of the dependent properties as additional arguments. The getter function will be called with a | |
// single object argument which has ES5 properties defined upon it matching the names of the dependent | |
// properties; you can then use this 'props' argument to access the dependent properties. | |
// |
using System; | |
using System.IO; | |
using System.Linq; | |
using System.Runtime.Serialization; | |
using System.ServiceModel.Channels; | |
using System.ServiceModel.Description; | |
using System.Text; | |
using System.Xml; | |
public static class SoapUtilities |
'use strict'; | |
const fortune = require('fortune'); | |
const http = require('http'); | |
const JsonApiSerializer = require('fortune-json-api'); | |
const MemoryAdapter = require('fortune/lib/adapter/adapters/memory'); | |
const HttpSerializer = fortune.net.http.Serializer; | |
const store = fortune( | |
{ |
INFO global: Vagrant version: 1.8.5 | |
INFO global: Ruby version: 2.2.3 | |
INFO global: RubyGems version: 2.4.5.1 | |
INFO global: VAGRANT_EXECUTABLE="C:\\HashiCorp\\Vagrant\\embedded\\gems\\gems\\vagrant-1.8.5\\bin\\vagrant" | |
INFO global: VAGRANT_INSTALLER_EMBEDDED_DIR="C:\\HashiCorp\\Vagrant\\embedded" | |
INFO global: VAGRANT_INSTALLER_ENV="1" | |
INFO global: VAGRANT_INSTALLER_VERSION="2" | |
INFO global: VAGRANT_INTERNAL_BUNDLERIZED="1" | |
INFO global: VAGRANT_LOG="debug" | |
INFO global: VAGRANT_OLD_ENV_="ExitCode=00000001" |
org.apache.kafka.streams.errors.TopologyBuilderException: Invalid topology building: External source topic not found: TableNumber2Aggregated-repartition | |
at org.apache.kafka.streams.processor.internals.StreamPartitionAssignor.ensureCopartitioning(StreamPartitionAssignor.java:452) | |
at org.apache.kafka.streams.processor.internals.StreamPartitionAssignor.ensureCopartitioning(StreamPartitionAssignor.java:440) | |
at org.apache.kafka.streams.processor.internals.StreamPartitionAssignor.assign(StreamPartitionAssignor.java:267) | |
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.performAssignment(ConsumerCoordinator.java:260) | |
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.onJoinLeader(AbstractCoordinator.java:404) | |
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.access$900(AbstractCoordinator.java:81) | |
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$JoinGroupResponseHandler.handle(AbstractCoordinator.java:358) | |
at org.apache.kafka.clients.consume |
I hereby claim:
To claim this, I am signing this object:
java.lang.IllegalStateException: Attempting to put a clean entry for key [urn:replicon-tenant:strprc971e3ca9:timesheet:97c0ce25-e039-4e8b-9f2c-d43f0668b755] into NamedCache [0_0-TimesheetNonBillableHours] when it already contains a dirty entry for the same key | |
at org.apache.kafka.streams.state.internals.NamedCache.put(NamedCache.java:124) | |
at org.apache.kafka.streams.state.internals.ThreadCache.put(ThreadCache.java:120) | |
at org.apache.kafka.streams.state.internals.CachingKeyValueStore.get(CachingKeyValueStore.java:146) | |
at org.apache.kafka.streams.state.internals.CachingKeyValueStore.get(CachingKeyValueStore.java:133) | |
at org.apache.kafka.streams.kstream.internals.KTableAggregate$KTableAggregateValueGetter.get(KTableAggregate.java:128) | |
at org.apache.kafka.streams.kstream.internals.KTableKTableLeftJoin$KTableKTableLeftJoinProcessor.process(KTableKTableLeftJoin.java:81) | |
at org.apache.kafka.streams.kstream.internals.KTableKTableLeftJoin$KTableKTableLeftJoinProcessor.process(KTableKTableLeftJoin.java:54) | |
at o |
from sqlalchemy.event import listen | |
def register_citext_type(conn, con_record): | |
from psycopg2.extensions import new_type, register_type | |
from contextlib import closing | |
def cast_citext(in_str, cursor): | |
if in_str == None: | |
return None | |
return unicode(in_str, cursor.connection.encoding) | |
with closing(conn.cursor()) as c: | |
c.execute("SELECT pg_type.oid FROM pg_type WHERE typname = 'citext'") |