-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathchat.R
250 lines (227 loc) · 8.61 KB
/
chat.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
#' Chat for gpttools
#'
#' This function provides a high-level interface for communicating with various
#' services and models supported by gpttools. It orchestrates the creation,
#' configuration, and execution of a request based on user inputs and options
#' set for gpttools The function supports a range of tasks from text
#' generation to code synthesis and can be customized according to skill level
#' and coding style preferences.
#'
#' @param prompt A string containing the initial prompt or question to be sent
#' to the model. This is a required parameter.
#' @param service The AI service to be used for the request. If not explicitly
#' provided, this defaults to the value set in
#' `getOption("gptstudio.service")`. If the option is not set, make sure to
#' provide this parameter to avoid errors.
#' @param history An optional parameter that can be used to include previous
#' interactions or context for the current session. Defaults to a system
#' message indicating "You are an R chat assistant".
#' @param stream A logical value indicating whether the interaction should be
#' treated as a stream for continuous interactions. If not explicitly
#' provided, this defaults to the value set in
#' `getOption("gptstudio.stream")`.
#' @param model The specific model to use for the request. If not explicitly
#' provided, this defaults to the value set in `getOption("gptstudio.model")`.
#' @param skill A character string indicating the skill or capability level of
#' the user. This parameter allows for customizing the behavior of the model
#' to the user. If not explicitly provided, this defaults to the value set in
#' `getOption("gptstudio.skill")`.
#' @param style The coding style preferred by the user for code generation
#' tasks. This parameter is particularly useful when the task involves
#' generating code snippets or scripts. If not explicitly provided, this
#' defaults to the value set in `getOption("gptstudio.code_style")`.
#' @param task The specific type of task to be performed, ranging from text
#' generation to code synthesis, depending on the capabilities of the model.
#' If not explicitly provided, this defaults to the value set in
#' `getOption("gptstudio.task")`.
#' @param custom_prompt An optional parameter that provides a way to extend or
#' customize the initial prompt with additional instructions or context.
#' @param process_response A logical indicating whether to process the model's
#' response. If `TRUE`, the response will be passed to
#' `gptstudio_response_process()` for further processing. Defaults to `FALSE`.
#' Refer to `gptstudio_response_process()` for more details.,
#' @param where A character string indicating the location or environment where
#' the chat is taking place. Options are `c("console", "source", and "shiny")`. The
#' default is `""`, which means the chat is taking place in the R console.
#' @param ... Reserved for future use.
#'
#' @return Depending on the task and processing, the function returns the
#' response from the model, which could be text, code, or any other structured
#' output defined by the task and model capabilities. The precise format and
#' content of the output depend on the specified options and the capabilities
#' of the selected model.
#'
#' @examples
#' \dontrun{
#' # Basic usage with a text prompt:
#' result <- chat("What is the weather like today?")
#'
#' # Advanced usage with custom settings, assuming appropriate global options are set:
#' result <- chat(
#' prompt = "Write a simple function in R",
#' skill = "advanced",
#' style = "tidyverse",
#' task = "coding"
#' )
#'
#' # Usage with explicit service and model specification:
#' result <- chat(
#' prompt = "Explain the concept of tidy data in R",
#' service = "openai",
#' model = "gpt-4-turbo-preview",
#' skill = "intermediate",
#' task = "general"
#' )
#' }
#'
#' @export
chat <- function(prompt,
service = getOption("gpttools.service"),
history = list(list(role = "system", content = "You are an R chat assistant")),
stream = getOption("gpttools.stream", TRUE),
model = getOption("gpttools.model"),
skill = getOption("gpttools.skill", NULL),
style = getOption("gpttools.code_style", "no preference"),
task = NULL,
custom_prompt = NULL,
process_response = FALSE,
where = "console",
...) {
if (rlang::is_false(stream) || service %in% c("google", "huggingface")) {
response <-
gptstudio::gptstudio_create_skeleton(
service = service,
prompt = prompt,
history = history,
stream = stream,
model = model,
...
) |>
gptstudio::gptstudio_skeleton_build(
skill = skill,
style = style,
task = task,
custom_prompt = custom_prompt
) |>
gptstudio::gptstudio_request_perform()
if (process_response) {
response |> gptstudio::gptstudio_response_process()
} else {
response$response
}
} else {
stream_chat(
prompt = prompt,
service = service,
r = NULL,
output_id = NULL,
where = where
)
}
}
#' Ghost Chat
#'
#' @inheritParams chat
#' @export
ghost_chat <- function(service = getOption("gpttools.service", "openai"),
stream = TRUE,
where = "source") {
context <- get_cursor_context()
instructions <- glue::glue(
"You are an expert coding assistant that provides brief code suggestions
directly into the files as code. Your response will go directly into an
.{context$file_ext} file. You response should only contain code or code
comments. Do not add freetext.
You are given context above and below the current cursor position.
Here is an example:
library(tidyverse)
p1 <-
ggplot(mtcars, aes(x = mpg, y = wt)) +
geom_point() +
geom_smooth(method = 'lm') +
labs(title = 'MPG vs. Weight', x = 'Miles per Gallon', y = 'Weight') +
[[start here]]
ggsave(\"myplot.png\", p1)
Your reponse begins at the placeholder [[start_here]].
Here is the context:
{context$above}
{context$below}"
)
stream_chat(
prompt = instructions,
service = service,
r = NULL,
output_id = NULL,
where = where
)
}
#' Writing Assistant
#'
#' @inheritParams chat
#' @export
ghost_writer <- function(service = getOption("gpttools.service", "openai"),
stream = TRUE,
where = "source") {
context <- get_cursor_context()
instructions <- glue::glue(
"You are an expert writing assistant that provides brief suggestions and
improvements directly into the text. Your response will go directly into the
document. You should only provide text or comments related to the writing.
Do not add any code. You are given context above and below the current
cursor position.
Here is an example:
The quick brown fox jumps over the lazy dog. The dog, startled by the
fox's sudden movement, [[start here]] barks loudly and chases after the fox.
The fox, being much quicker and more agile, easily outmaneuvers the dog and
disappears into the dense forest.
Your response begins at the placeholder [[start_here]].
Here is the context:
{context$above}
{context$below}"
)
stream_chat(
prompt = instructions,
service = service,
r = NULL,
output_id = NULL,
where = where
)
}
get_cursor_context <- function(context_lines = 20,
placeholder = "[[start_here]]") {
doc <- rstudioapi::getSourceEditorContext()
cursor_line <- doc$selection[[1]]$range$start[1]
cursor_pos <- doc$selection[[1]]$range$end
start_line <- max(1, cursor_line - context_lines)
end_line <- min(length(doc$content), cursor_line + context_lines)
original_str <- doc$contents[cursor_line]
doc$contents[cursor_line] <-
stringr::str_c(
stringr::str_sub(original_str, end = cursor_pos[2] - 1),
placeholder,
stringr::str_sub(original_str, start = cursor_pos[2])
)
context_above <- if (start_line < cursor_line) {
doc$content[(start_line):(cursor_line)] |>
paste0(collapse = "\n")
} else {
character(0)
}
context_below <- if (end_line > cursor_line) {
doc$content[(cursor_line + 1):end_line] |>
paste0(collapse = "\n")
} else {
""
}
if (doc$path == "") {
file_ext <- "R"
} else {
file_ext <- doc$path |> tools::file_ext()
}
list(
above = context_above,
below = context_below,
cursor = cursor_pos,
file_ext = file_ext
)
}