Nothing
# Send prompt to an agent and get a response
rbionfoExp="Act as an expert bioformatician and R user. Answer questions using your expertise. When providing code provide the code in triple backtics and as a single block."
#' Send a prompt to a specified language model agent and return the response.
#'
#' @param agent An object containing the agent's information (e.g., type and model etc.).
#' @param prompt The prompt text to send to the language model.
#' @param context Optional context to provide alongside the prompt (default is promptContext(type = "simple")).
#' @param return.type The type of output to return, either the text response ("text") or
#' the entire response object ("object").
#' @param previous.msgs a list of lists for previous prompts and responses.
#' Useful to add context of the previous messages for the API.
#' this argument works with openai and generic models, but not with replicate API.
#' Default: NULL
#' @param ... Additional arguments to be passed to the LLM API. Such as maximum tokens, ("max_tokens"), to be returned.
#' Users can also also provide other arguments in openai API-like
#' arguments documented on [the official documentation](https://platform.openai.com/docs/api-reference/chat/create). Other APIs are also following similar argument naming patterns.
#' @return The text response or the entire response object, based on the specified return type.
#' @seealso \code{\link{promptContext}} for predefined contexts to use.
#' @examples
#' \dontrun{
#' agent <- setupAgent(name="openai",type="chat",model="gpt-4",
#' ai_api_key=Sys.getenv("OPENAI_API_KEY"))
#' prompt <- "tell me a joke"
#' response <- sendPrompt(agent, prompt,context="")
#'
#' # increase tokens, it is important for getting longer responses
#' response <- sendPrompt(agent,prompt,context="",return.type="text", max_tokens = 500)
#'
#' # get previous messages into the context
#' prompt="what about 2010?"
#' response <- sendPrompt(agent,prompt,context="",
#' return.type="text",
#' previous.msgs=list(
#' list(
#' "role" = "user",
#' "content" = "Who won the world
#' series in 2020?"
#' ),
#' list(
#' "role" = "assistant",
#' "content" = "The Los Angeles Dodgers"
#' )
#' )
#' )
#'
#'
#' }
#' @import openai
#'
#' @export
sendPrompt<-function(agent,prompt,context=promptContext(type = "simple"),
return.type=c("text","object"),previous.msgs=NULL,...){
# Argument validation
#-----------------------------------------------------------------------------
assertthat::assert_that(
assertthat::`%has_name%`(agent,c("name","model","API","url","headers","ai_api_key","type")),
assertthat::noNA(agent)
)
assertthat::assert_that(
assertthat::is.string(prompt),
assertthat::noNA(prompt)
)
if (!is.null(context)) {
assertthat::assert_that(
assertthat::is.string(context),
assertthat::noNA(context)
)
}
assertthat::assert_that(
assertthat::is.string(return.type),
assertthat::noNA(return.type)
)
# -------------------------------------------------------------------------------------
if(agent$name=="testAgent"){
promptFunc=testPrompter
}else if(agent$name=="userAgent"){
if (agent$API == "openai"){
if (agent$type=="completion"){
promptFunc = .openai_comp
}else if (agent$type =="chat"){
promptFunc = .openai_chat
}else{
stop("Agent type ", agent$type ," is not compatible with the current setup")
}
}else if (agent$API == "replicate"){
promptFunc = .replicate_chat
}else if (agent$API == "generic"){
promptFunc = genericChat
}else{
stop("The specified API ",agent$API," is not compatible with the current setup")
}
}else{
stop("The specified LLM agent is not compatible with the current setup")
}
# Send the prompt and get the result
if (return.type != "text" & return.type !="object"){
stop("Return type ",return.type," not supported")
}
# Build the prompt with context if exists
final.prompt=paste(context,prompt,sep="\n")
res<-promptFunc(agent=agent, prompt=final.prompt, return.type=return.type,
previous.msgs=previous.msgs,...)
return (res)
}
# environment for testPrompter:
pkg.env <- new.env()
pkg.env$prompterCount <- 0
# This function returns three responses one after another
# It is used to test the selfcorrect function() and maybe used for other
# tests
#' @noRd
testPrompter<-function(agent,prompt, ...){
if(pkg.env$prompterCount >= 3){pkg.env$prompterCount <- 0}
pkg.env$prompterCount <- pkg.env$prompterCount + 1
# List of responses where it gradually gets the correct code
botResponses=list(
"\n\nThe following R code will read the file called \"test.txt\", normalize the table and do PCA. First, the code will read the file into an R data frame: \n\n```\ndata <- read.table(\"test.txt\", header = TRUE, sep = \"\\t\")\n```\n\nNext, the data will be normalized to the range of 0 to 1:\n\n```\nnormalized.data <- scale(data, center = TRUE, scale = TRUE)\n```\n\nFinally, the normalized data will be used to do a Principal Component Analysis (PCA):\n\n```\npca <- princomp(normalized.data)\n```",
"\n\nThe second response.The following R code will read the file called \"test.txt\", normalize the table and do PCA. First, the code will read the file into an R data frame: \n\n```\ndata <- read.table(\"test.txt\", header = TRUE, sep = \"\\t\")\n```\n\nNext, the data will be normalized to the range of 0 to 1:\n\n```\nnormalized.data <- scale(data, center = TRUE, scale = TRUE)\n```\n\nFinally, the normalized data will be used to do a Principal Component Analysis (PCA):\n\n```\npca <- princomp(normalized.data)\n```",
"\n\nThe third response.The following R code will read the file called \"test.txt\", normalize the table and do PCA. First, the code will read the file into an R data frame: \n\n```\nplot(1:10)```\n\nNext, the data will be normalized to the range of 0 to 1:\n\n"
)
return(botResponses[[pkg.env$prompterCount]])
}
# Internal completion code for open ai
# hides specific stuff so that promptFunc works in a unified way
# across agents
#' @noRd
.openai_comp<-function(agent, return.type, prompt,...){
res <- openai::create_completion(model=agent$model,
openai_api_key = agent$ai_api_key,
prompt=prompt,...)
if (return.type == "text"){
return (res$choices$text)
}else{
return(res)
}
}
# Internal completion code for open ai
# hides specific stuff so that promptFunc works in a unified way
# across agents
#' @noRd
.openai_chat<-function(agent,prompt,return.type,previous.msgs,...){
args <- list(...)
# For working with self-correct function
if ("messages" %in% names(args)){
res <- openai::create_chat_completion(model=agent$model,
messages=args$messages,
openai_api_key = agent$ai_api_key)
}else if(!is.null(previous.msgs) & is.list(previous.msgs) ){
new.msgs=c(previous.msgs,list(list("role" = "user","content" = prompt)))
res <- openai::create_chat_completion(model=agent$model,
messages=new.msgs,
openai_api_key = agent$ai_api_key,
...)
}else{
res <- openai::create_chat_completion(model=agent$model,
messages=list(
list(
"role" = "user",
"content" = prompt
)),
openai_api_key = agent$ai_api_key,
...)
}
if (return.type=="text"){
return (res$choices[1,5])
}else(
return(res)
)
}
# Internal completion code for replicate AI
# hides specific stuff so that promptFunc works in a unified way
# across agents.
#' @noRd
.replicate_chat <- function(agent,prompt,return.type,...){
# Setup body for replicate request
body <- list()
body[["version"]] <- agent$model
body[["input"]] <- list("prompt"= prompt)
body[["max_new_tokens"]]<-Inf
#send request:
posted <- httr::POST(
url = agent$url,
httr::add_headers(.headers = agent$headers ),
body = body,
encode = "json"
)
# Parse response to retrieve url needed for fetching response
parsed_post <- posted %>%
httr::content(as = "text", encoding = "UTF-8") %>%
jsonlite::fromJSON(flatten = TRUE)
# If for some reason request did not go through
if (!parsed_post$status =="starting"){
print (parsed_post)
stop("Request failed.")
}
# Fetch status and parse
respons <- httr::GET(
url = parsed_post$urls$get,
httr::add_headers(.headers = agent$headers )
)
parsed_get <- respons %>%
httr::content(as = "text", encoding = "UTF-8") %>%
jsonlite::fromJSON(flatten = TRUE)
# If not yet finished request again until finished
while (parsed_get$status!= "succeeded"){
# Fetch response and parse
respons <- httr::GET(
url = parsed_post$urls$get,
httr::add_headers(.headers = agent$headers )
)
parsed_get <- respons %>%
httr::content(as = "text", encoding = "UTF-8") %>%
jsonlite::fromJSON(flatten = TRUE)
# Pause .2 sec until next request
Sys.sleep(0.2)
}
# Flatten response when return is text. Otherwise return object
if (return.type == "text"){
response<-c()
for (i in parsed_get$output){
response <- paste0(response,i)
}
return(response)
}else{
return (parsed_get)
}
}
# send chat prompt to a generic API that is similar to openai API
genericChat<-function(agent, prompt,previous.msgs,...){
args=list(...) # get ellipsis arguments, these must be for API
api_argnames<-c("model","temp","top_p","n","stream","max_tokens",
"presence_penalty","frequency_penalty","logit_bias","user")
if(length(args)>0){
body=args[names(args) %in% api_argnames]
}else{
body=list()
}
body[["model"]]=agent$model
if("messages" %in% names(args) ){
body[["messages"]]=args$messages
}else if(!is.null(previous.msgs) & is.list(previous.msgs) ){
new.msgs=c(previous.msgs,list(list("role" = "user","content" = prompt)))
body[["messages"]]=new.msgs
}else {
body[["messages"]]=list(
list(role = "user", content = prompt)
)
}
# get response
response <- httr::POST(
url =agent$url,
httr::add_headers(Authorization = paste("Bearer", agent$ai_api_key)),
httr::content_type("application/json"),
encode = "json",
body = body
)
# check if there is error
if(httr::status_code(response)>200) {
result <- trimws(httr::content(response))
} else {
result <- trimws(httr::content(response)$choices[[1]]$message$content)
}
return(result)
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.