This is a free and open-source (FOSS) curation of prompts for OpenAI’s GPT-3.
| License |
|---|
| GPL-3 |
Push your own branch. Request access to the semiosis organisation.
https://github.com/semiosis/prompt-tests/
This is the format I have used to organise
these prompts. It is yaml with a schema,
which is rapidly growing.
This is a very basic .prompt file that should work.
title: "My new prompt"
# Make sure to increment the prompt-version every time you update
# so they will be tested.
prompt-version: 1
prompt: "Complete this sentence"
temperature: 0.8
max-tokens: 60This YASnippet snippet contains an explanation of the .prompt file format.
# -*- mode: snippet -*-
# name: prompt
# group: prompt-engineering
# key: pr
# expand-env: ((yas-indent-line 'fixed))
# --
in-development: yes
title: "${1:title}"
todo:
design-patterns:
# future-titles: ""
# aims: |+
# - More abstractive rewording
doc: "Given ... ${1:title}"
# aims: |+
# - More abstractive rewording
prompt-version: 1
# <:pp> defines a point where the following
# text is concatenated before the postprocessor
# is run.
# <1>, <2> etc. are where variables are substituted
# <1> is special because it may be the current selection
# <2> May be inferred from <1> via a prompt.
# This way, a function can be curried/beta-reduced to a function of 1 argument.
prompt: |+
${2:contents}
<1> are like <2> in that
problems:
# Put problems directly after the prompt because the prompt is a multiline string.
# - "Struggles with the latter columns."
# # Additional transformation of prompt after the template
# prompt-filter: "sed -z 's/\\s\\+$//'"
# # Trailing whitespace is always removed
# prompt-remove-trailing-whitespace: on
# myrc will select the completion engine using my config.
# This may be openi-complete or something else
engine: "myrc"
# if nothing is selected in myrc and openapi-complete is used
# by default, then openai should select this engine.
preferred-openai-engine: "davinci"
# 0.0 = /r/hadastroke
# 1.0 = /r/iamveryrandom
# Use 0.3-0.8
temperature: 0.8
max-tokens: 60
top-p: 1.0
# Not available yet: openai api completions.create --help
frequency-penalty: 0.5
# If I make presence-penalty 0 then it will get very terse
presence-penalty: 0.0
best-of: 1
# Only the first one will be used by the API,
# but the completer script will use the others.
# Currently the API can only accept one stop-sequence, but that may change.
stop-sequences:
# - "\\n"
# - "\\n\\n"
# 2 hashes is more reliable as a stop sequence because
# sometimes the generation morphs from ### to ##
- "##"
# inject-start-text: yes
# inject-restart-text: yes
# show-probabilities: off
# Cache the function by default when running the prompt function
cache: on
vars:
- "former"
- "latter"
examples:
- "boysenberries"
- "strawberries"
# test-output: "both are types of berry"
# Completion is for generating a company-mode completion function
# completion: on
# # default values for pen -- evaled
# # This is useful for completion commands.
# pen-defaults:
# - "(detect-language)"
# - "(pen-preceding-text)"
# These are elisp String->String functions and run from pen.el
# It probably runs earlier than the preprocessors shell scripts
pen-preprocessors:
- "pen-pf-correct-grammar"
# # A preprocessor filters the var at that position
# the current implementation of preprocessors is kinda slow and will add ~100ml per variable
# # This may be useful to distinguish a block of text, for example
# preprocessors:
# - "sed 's/^/- /"
# - "cat"
chomp-start: on
chomp-end: off
prefer-external: on
# This is an optional external command which may be used to perform the same task as the API.
# This can be used to train the prompt.
# The external command must take arguments, not stdin
# So that all variables may be passed into it.
external: "generate-text-from-input.sh"
# This compares the output of the external script to the output of the LM
similarity-test:
# This script returns a 0-1 decimal value representing the quality of the generated output.
# The input is 2 arguments each containing output
# The output is a decimal number from 0 to 1
quality-script: "my-quality-checker-for-this-prompt.sh"
# This script can be used to validate the output.
# If the output is accurate, the validation script returns exit code 1.
# The input is 2 arguments each containing output
validation-script: "my-validator-for-this-prompt.sh"
# Enable running conversation
conversation-mode: no
# This is the name of an external database-driven pretext generator.
# It would typically summarize and fact extract from history.
# It then passes the pretext to the new prompt.
# conversation-pretext-generator: "human-conversation"
# Replace selected text
filter: no
# Keep stitching together until reaching this limit
# This allows a full response for answers which may need n*max-tokens to reach the stop-sequence.
stitch-max: 0
needs-work: no
n-test-runs: 5
# Prompt function aliases
# aliases:
# - "asktutor"
# postprocessor: "sed 's/- //' | uniqnosort"
# # Run it n times and combine the output
# n-collate: 10
# This for combining prompts:
# It might be, for example, summarize, or uniqnosort
# collation-postprocessor: "uniqnosort"
# examplary continuation function
# prompts are not stitched together / composed but examplary functions are
# <g> is the existing generation
# <1> is a variable
continuation-function: "list-of <1> <g>"If you are looking for a tool which can load
and make use of these .prompt files
directly, you may use pen.el, a package of
emacs that was used to generate them.