Skip to content

Instantly share code, notes, and snippets.

@daschl
Last active March 29, 2017 09:36
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save daschl/47ab1332cd75b26c00734d70f34edfcb to your computer and use it in GitHub Desktop.
Save daschl/47ab1332cd75b26c00734d70f34edfcb to your computer and use it in GitHub Desktop.
Full Fetch travel-sample example
import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig;
import com.couchbase.client.core.metrics.LatencyMetricsCollectorConfig;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.couchbase.client.java.query.AsyncN1qlQueryResult;
import com.couchbase.client.java.query.AsyncN1qlQueryRow;
import com.couchbase.client.java.query.N1qlMetrics;
import com.couchbase.client.java.query.N1qlQuery;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class BulkFetchExample {
public static void main(String... args) {
CouchbaseEnvironment env = DefaultCouchbaseEnvironment.create();
CouchbaseCluster cluster = CouchbaseCluster.create(env);
Bucket bucket = cluster.openBucket("travel-sample");
int numFetches = 10;
for(int i = 0; i < numFetches; i++) {
long start = System.nanoTime();
//List<JsonObject> result = queryN1qlOnly(bucket, 15000);
List<JsonObject> result = queryWithBulkFetch(bucket, 15000);
long end = System.nanoTime();
System.out.println("It took " + TimeUnit.NANOSECONDS.toMillis(end - start) + "ms to fetch "
+ result.size() + " docs.");
}
cluster.disconnect();
env.shutdown();
}
private static List<JsonObject> queryN1qlOnly(Bucket bucket, int limit) {
String bucketName = "`" + bucket.name() + "`";
String q = "SELECT " + bucketName + ".* FROM " + bucketName + " LIMIT " + limit;
return bucket.async().query(N1qlQuery.simple(q))
.doOnNext(res -> res.info().map(N1qlMetrics::elapsedTime).forEach(t -> System.out.println("N1QL Took: " + t)))
.flatMap(AsyncN1qlQueryResult::rows)
.map(AsyncN1qlQueryRow::value)
.toList()
.toBlocking()
.single();
}
private static List<JsonObject> queryWithBulkFetch(Bucket bucket, int limit) {
String bucketName = "`" + bucket.name() + "`";
String q = "SELECT meta().id as id FROM " + bucketName + " LIMIT " + limit;
return bucket.async().query(N1qlQuery.simple(q))
.doOnNext(res -> res.info().map(N1qlMetrics::elapsedTime).forEach(t -> System.out.println("N1QL Took: " + t)))
.flatMap(AsyncN1qlQueryResult::rows)
.flatMap(row -> bucket.async().get(row.value().getString("id")))
.map(JsonDocument::content)
.toList()
.toBlocking()
.single();
}
}
@abhisheksharma1
Copy link

Not sure why but queryWithBulkFetch method would give result with inconsistency ordering. Wondering if there is any way to achieve it.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment