Skip to content

Instantly share code, notes, and snippets.

@CorradoLanera
Last active October 12, 2022 14:31
Show Gist options
  • Save CorradoLanera/a3d39f35c5d8fa91297f0974e8b820af to your computer and use it in GitHub Desktop.
Save CorradoLanera/a3d39f35c5d8fa91297f0974e8b820af to your computer and use it in GitHub Desktop.
Shiny + future + plumber
#
# This is a Shiny web application. You can run the application by
# clicking the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(future)
plan("multisession")
library(httr)
library(magrittr)
fast <- function(t, which_me = "First") {
force(t)
paste(which_me, "fast done.") |>
paste(Sys.time())
}
slow <- function(t, which_me = "First") {
Sys.sleep(t)
paste(which_me, "slow done.") |>
paste(Sys.time())
}
plumber_fast <- function(t, which_me = "first") {
connectApiUrl <- "http://127.0.0.1:9038"
GET(connectApiUrl, path = "fast",
query = list(t = t, which_me = which_me)) %>%
content("parsed") |>
paste(Sys.time())
}
plumber_slow <- function(t, which_me = "first") {
connectApiUrl <- "http://127.0.0.1:9038"
GET(connectApiUrl, path = "slow",
query = list(t = t, which_me = which_me)) %>%
content("parsed") |>
paste(Sys.time())
}
plumber_future_fast <- function(t, which_me = "first") {
connectApiUrl <- "http://127.0.0.1:9038"
GET(connectApiUrl, path = "future_fast",
query = list(t = t, which_me = which_me)) %>%
content("parsed") |>
paste(Sys.time())
}
plumber_future_slow <- function(t, which_me = "first") {
connectApiUrl <- "http://127.0.0.1:9038"
GET(connectApiUrl, path = "future_slow",
query = list(t = t, which_me = which_me)) %>%
content("parsed") |>
paste(Sys.time())
}
#* Perform slow computation
#* Echo back the input
#* @get /fast
function(t = 10) {
Sys.sleep(t)
list(res = TRUE)
}
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Sequences of computations"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput(
"slow_t",
"Seconds of slowliness of slow computations:",
min = 1,
max = 30,
value = 5
),
checkboxInput("fut", "Use future?"),
checkboxInput("plumb", "Use plumber?")
),
# Show a plot of the generated distribution
mainPanel(
textOutput("first_fast"),
textOutput("first_slow"),
textOutput("second_slow"),
textOutput("second_fast"),
textOutput("sequential")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$first_fast <- renderText({
t_slow <- req(input$slow_t)
if (input$fut) {
if (input$plumb) {
future::future(plumber_future_fast(t_slow, "First"))
} else {
future::future({fast(t_slow, "First")})
}
} else {
if (input$plumb) {
plumber_fast(t_slow, "First")
} else {
fast(t_slow, "First")
}
}
})
output$first_slow <- renderText({
t_slow <- req(input$slow_t)
if (input$fut) {
if (input$plumb) {
future::future(plumber_future_slow(t_slow, "First"))
} else {
future::future({slow(t_slow, "First")})
}
} else {
if (input$plumb) {
plumber_slow(t_slow, "First")
} else {
slow(t_slow, "First")
}
}
})
output$second_slow <- renderText({
t_slow <- req(input$slow_t)
if (input$fut) {
if (input$plumb) {
future::future(plumber_future_slow(t_slow, "Second"))
} else {
future::future({slow(t_slow, "Second")})
}
} else {
if (input$plumb) {
plumber_slow(t_slow, "Second")
} else {
slow(t_slow, "Second")
}
}
})
output$second_fast <- renderText({
t_slow <- req(input$slow_t)
if (input$fut) {
if (input$plumb) {
future::future(plumber_future_fast(t_slow, "Second"))
} else {
future::future({fast(t_slow, "Second")})
}
} else {
if (input$plumb) {
plumber_fast(t_slow, "Second")
} else {
fast(t_slow, "Second")
}
}
})
output$sequential <- renderText({
t_slow <- req(input$slow_t)
if (input$fut) {
if (input$plumb) {
future::future(plumber_future_fast(t_slow, "Second"))
future::future(plumber_future_slow(t_slow, "Second"))
future::future(plumber_future_slow(t_slow, "Second"))
future::future(plumber_future_fast(t_slow, "Second"))
"Sequential done."
} else {
future::future({fast(t_slow, "Second")})
future::future({slow(t_slow, "Second")})
future::future({slow(t_slow, "Second")})
future::future({fast(t_slow, "Second")})
"Sequential done."
}
} else {
if (input$plumb) {
plumber_fast(t_slow, "Second")
plumber_slow(t_slow, "Second")
plumber_slow(t_slow, "Second")
plumber_fast(t_slow, "Second")
"Sequential done."
} else {
fast(t_slow, "Second")
slow(t_slow, "Second")
slow(t_slow, "Second")
fast(t_slow, "Second")
"Sequential done."
}
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
library(plumber)
library(future)
plan("multisession")
#* @serializer text
#* Perform fast computation
#* @get /fast
function(t, which_me = "First") {
cat("fast\n")
force(t)
list(res = paste(which_me, "fast done."))
}
#* @serializer text
#* Perform slow computation
#* @get /slow
function(t, which_me = "First") {
cat("slow\n")
Sys.sleep(t)
list(res = paste(which_me, "slow done."))
}
#* @serializer text
#* Perform fast computation
#* @get /future_fast
function(t, which_me = "First") {
cat("future fast\n")
future::future({
force(t)
list(res = paste(which_me, "fast done."))
})
}
#* @serializer text
#* Perform slow computation
#* @get /future_slow
function(t, which_me = "First") {
cat("future slow\n")
future::future({
Sys.sleep(t)
list(res = paste(which_me, "slow done."))
})
}
@CorradoLanera
Copy link
Author

Hi @georgeblck

The scripts are all in their first version: I've never updated them. They are alternatives: app.R activate {future} within Shiny and call non-future API (plumber.R ), while app-future-into-api.R use {future} inside the API (plumber-with-future.R ). If you see at the mentioned question on SO, I could not make them work as (I) would expect.

@CorradoLanera
Copy link
Author

Hi @georgeblck ,
I have finally fix it. Now it should work.

  1. run the plumber API on an R/RStudio session
  2. on a separate R/Rstudio session run the App
  3. try open many (at least two) instances of the same app (not in different R/Rstudio session, but maybe click on the RStudio button to open the app on a briwser)
  4. play with time and options to see what happened.

Now, parallelism works both with and without {plumber} API. Text is not displayed immediately because the shiny process managing the reactivity still (and must be!) sequential; on the other hand, I have added the system time to the output, and as you could see the evaluations runs as expected in parallel with future.

@georgeblck
Copy link

Hi @CorradoLanera thanks for your work.

I'm still not sure if I really understand the code and the two alternatives; but that's probably because I am new to plumber and don't know the future package.
I was actually looking for a way to have make a shiny app that is only a plumber API.

@CorradoLanera
Copy link
Author

Hi @georgeblck, I have removed the alternatives. Now there are two files/scripts only, which represent a single solution.
plumber-with-future.R implements the plumber APIs (powered by {plumber} and {future} packages). You need to execute that script in an R session to activate the services provided with the corresponding APIs.

app-future-into-api.R implement a Shiny App, that uses that APIs to work. So you need to run the app in a different R session, and you can so spin up an istance of the app. If you use the app, asking for something, the app will queries the APIs for the computation, the service (running on the first r session) will execute the computation, rwturn thebresukt to the app and you see it on the page

@CorradoLanera
Copy link
Author

the twin usage of {future} (in the two scripts) permit both the app to query the APIs asynchronously (i.e. permitting to make multiple query even if a result is not yet returned), and the services to execute the computation queried asynchronously (i.e. allowing concurrent computation to be evaluated).

That way the (single) app-process would not be frozen while waiting for queried result to be returned from the services (queried through the APIs) and it can process "other" stuff in the meantime. On the same time, the (single) services-process would not be frozen while conducting a computation waiting for the result to execute the next one, and it can run many of them "simultaneously", returning to the app the results as soon as they are ready.

That said (as far as I have understood) the (single) app-process monitoring the outputs invalidation process, remain sequential and synchronous, so even if you can ask for multiple computation from the app at the "same" time, and the service can evaluate multiple query at the "same" time, and the service return tu the app results as soon as they are ready (so not necessarily in the same order they are queried), the app will show you the result in order because it still wait for an output to be updated before updates the next outdated one.

The real power of all of that is not (only) on the single user (that see the advantage of view all the result at the same time in the time of the first-slower one) but in multiple concurrent user using the same app, which do not use the same process for rendering and orchestrating the app (because it uses the services linked to the APIs) and can manage multiple queries from multiple users simultaneously, while computation happened simultaneously too, so that users do not need to wait each other's results before to see their own ones.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment