Nothing
#' API chat: send create (chat) request
#'
#' Creates a model response for the given chat conversation. To get more details, visit
#' https://platform.openai.com/docs/api-reference/chat/create
#' https://platform.openai.com/docs/guides/text-generation
#' @inherit request params return
#' @param messages data.frame, data.frame with messages comprising the conversation so far
#' @param model string, ID of the model to use. See the model endpoint compatibility table
#' https://platform.openai.com/docs/models/model-endpoint-compatibility
#' for details on which models work with the Chat API.
#' @param frequency_penalty NULL/double, number between -2.0 and 2.0. Positive values penalize new tokens based on their
#' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. More at
#' https://platform.openai.com/docs/guides/text-generation/parameter-details
#' @param logit_bias NULL/list, modify the likelihood of specified tokens appearing in the completion. Accepts a list
#' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to
#' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will
#' vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or
#' 100 should result in a ban or exclusive selection of the relevant token. See https://platform.openai.com/tokenizer
#' @param logprobs NULL/flag, whether to return log probabilities of the output tokens or not. If true, returns the log
#' probabilities of each output token returned in the content of message. This option is currently not available on the
#' gpt-4-vision-preview model. Defaults to false.
#' @param top_logprobs NULL/int, an integer between 0 and 5 specifying the number of most likely tokens to return at
#' each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
#' @param max_tokens NULL/int, the maximum number of tokens to generate in the chat completion
#' @param n NULL/int, how many chat completion choices to generate for each input message.
#' @param presence_penalty NULL/double, number between -2.0 and 2.0. Positive values penalize new tokens based on
#' whether they appear in the text so far, increasing the model's likelihood to talk about new topics. See
#' https://platform.openai.com/docs/guides/text-generation/parameter-details
#' @param response_format NULL/list, an object specifying the format that the model must output. Compatible with
#' gpt-4-1106-preview and gpt-3.5-turbo-1106. Setting to list(type = "json_object") enables JSON mode, which guarantees
#' the message the model generates is valid JSON. Important: when using JSON mode, you must also instruct the model to
#' produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of
#' whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request.
#' Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation
#' exceeded max_tokens or the conversation exceeded the max context length. Text is default response format.
#' @param seed NULL/int, this feature is in Beta. If specified, our system will make a best effort to sample
#' deterministically, such that repeated requests with the same seed and parameters should return the same result.
#' Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes
#' in the backend.
#' @param stop NULL/character vector, up to 4 sequences where the API will stop generating further tokens.
#' @param stream NULL/flag, if set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as
#' data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
#' Defaults to false
#' @param temperature NULL/double, what sampling temperature to use, between 0 and 2. Higher values like 0.8 will make
#' the output more random, while lower values like 0.2 will make it more focused and deterministic.
#' @param top_p NULL/double, an alternative to sampling with temperature, called nucleus sampling, where the model
#' considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
#' probability mass are considered. We generally recommend altering this or temperature but not both. Defaults to 1
#' @param tools NULL/list, a "list" of tools the model may call. Currently, only functions are supported as a tool.
#' Use this to provide a list of functions the model may generate JSON inputs for. Example value:
#' \preformatted{
#' list(
#' # string (required), the type of the tool. Currently, only
#' # 'function' is supported
#' type = "function",
#'
#' # list (required)
#' function = list(
#' # string (optional)
#' description = "some description",
#'
#' # string (required), the name of the function to be called.
#' # Must be a-z, A-Z, 0-9, or contain underscores and dashes,
#' # with a maximum length of 64
#' name = "functionname",
#'
#' # list (optional), the parameters the functions accepts,
#' # described as a JSON Schema object. Omitting parameters
#' # defines a function with an empty parameter list.
#' parameters = list()
#' )
#' )
#' }
#' @param tool_choice NULL/string/list, controls which (if any) function is called by the model. `none` means the model
#' will not call a function and instead generates a message. `auto` means the model can pick between generating a
#' message or calling a function. Specifying a particular function via list
#' `list(type = "function", function": list(name: "my_function"))` forces the model to call that function. `none` is the
#' default when no functions are present, `auto` is the default if functions are present.
#' @param user NULL/string, a unique identifier representing your end-user, which can help OpenAI to monitor and detect
#' abuse. See https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
#' @export
#'
#' @examples
#' \dontrun{
#' question <- dialog_df("hi")
#' res_content <- chat_request(
#' messages = question,
#' model = "gpt-3.5-turbo"
#' )
#' if (!is_error(res_content)) {
#' answer <- chat_fetch_messages(res_content)
#' conversation <- merge_dialog_df(question, answer)
#' print(conversation)
#' }
#' }
#'
chat_request <- function(
messages,
model,
frequency_penalty = NULL,
logit_bias = NULL,
logprobs = NULL,
top_logprobs = NULL,
max_tokens = NULL,
n = NULL,
presence_penalty = NULL,
response_format = NULL,
seed = NULL,
stop = NULL,
stream = NULL,
temperature = NULL,
top_p = NULL,
tools = NULL,
tool_choice = NULL,
user = NULL,
api_key = api_get_key()
) {
# asserts
stopifnot(
"`messages` must be a data.frame" = checkmate::testDataFrame(messages),
"`model` must be a non-empty string" = checkmate::testString(model, min.chars = 1),
"`frequency_penalty` must be a NULL or double" = checkmate::testDouble(frequency_penalty, null.ok = TRUE),
"`logit_bias` must be a NULL or list of double(s)" =
is.null(logit_bias) ||
checkmate::testList(logit_bias) && all(vapply(logit_bias, is.double, logical(1))),
"`logprobs` must be a NULL or flag" = checkmate::testFlag(logprobs, null.ok = TRUE),
"`top_logprobs` must be a NULL or integer" = checkmate::testInt(top_logprobs, null.ok = TRUE),
"`max_tokens` must be a NULL or integer" = checkmate::testInt(max_tokens, null.ok = TRUE),
"`n` must be a NULL or integer" = checkmate::testInt(n, null.ok = TRUE),
"`presence_penalty` must be a NULL or double" = checkmate::testDouble(presence_penalty, null.ok = TRUE),
"`response_format` must be a NULL or double" = checkmate::testList(response_format, null.ok = TRUE),
"`seed` must be a NULL or integer" = checkmate::testInt(seed, null.ok = TRUE),
"`stop` must be a NULL or character vect." = checkmate::testCharacter(stop, min.len = 1, null.ok = TRUE),
"`stream` must be a NULL or flag" = checkmate::testFlag(stream, null.ok = TRUE),
"`temperature` must be a NULL or double" = checkmate::testDouble(temperature, null.ok = TRUE),
"`top_p` must be a NULL or double" = checkmate::testDouble(top_p, null.ok = TRUE),
"`tools` must be a NULL or \"object\" (list)" = checkmate::testList(tools, null.ok = TRUE),
"`tool_choice` must be a NULL, string or \"object\" (list)" =
checkmate::testString(tool_choice, null.ok = TRUE) ||
checkmate::testList(tool_choice, null.ok = TRUE),
"`user` must be a NULL or non-empty string" = checkmate::testString(user, null.ok = TRUE, min.chars = 1)
)
request(
"https://api.openai.com/v1/chat/completions",
api_key,
body = list(
messages = messages[, c("role", "content")],
model = model,
frequency_penalty = frequency_penalty,
logit_bias = logit_bias,
logprobs = logprobs,
top_logprobs = top_logprobs,
max_tokens = max_tokens,
n = n,
presence_penalty = presence_penalty,
response_format = response_format,
seed = seed,
stream = stream,
temperature = temperature,
top_p = top_p,
tools = tools,
tool_choice = tool_choice,
user = user
)
)
}
#' Fetch messages from response content
#'
#' Fetch messages (dialog data.frame with chat messages) from response content
#' @inherit chat_request examples
#' @param res_content response object returned by \link{chat_request}
#' @return Messages from response as dialog data.frame (see \link{dialog_df})
#' @export
#'
chat_fetch_messages <- function(res_content) {
do.call(merge_dialog_df, lapply(res_content$choices, function(choice) {
dialog_df(
content = choice$message$content,
role = choice$message$role,
finish_reason = choice$finish_reason
)
}))
}
#' Feedback - ask chat and receive reply
#'
#' Simple \link{chat_request} wrapper - send text to chat and get response.
#' @inheritParams chat_request
#' @param question string, question text
#' @param print flag, If TRUE, print the answer on the console
#' @return string, chat answer
#' @export
#'
feedback <- function(question, model = "gpt-3.5-turbo", max_tokens = NULL, print = TRUE) {
# asserts
stopifnot(
"`print` must be a flag" = checkmate::testFlag(print)
)
# request
res_content <- chat_request(
messages = dialog_df(question),
model = model,
max_tokens = max_tokens
)
if (is_error(res_content)) {
NA_character_
}
else {
feedback_df <- chat_fetch_messages(res_content)
feedback <- paste0(feedback_df$content, collapse = " ")
if (print) {
cat(feedback)
invisible(feedback)
}
else feedback
}
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.