cmd

package
v0.6.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 7, 2024 License: MIT Imports: 23 Imported by: 0

Documentation

Index

Constants

View Source
const DefaultMaxTokens = 1000
View Source
const DefaultSystemPrompt = "You are a helpful assistant, you help people by answering their questions politely and precisely."
View Source
const DefaultTemperature = 0.7

Variables

View Source
var ChatCmd = &cobra.Command{
	Use:   "chat",
	Short: "read a chat from stdin and send to LLM chat",
	Run: func(cmd *cobra.Command, args []string) {

		if hasDifference(vimPlugins, "vim/ftdetect", os.ExpandEnv("$HOME/.vim/ftdetect")) {
			log.Fatalf("vichat vim plugin appears to be out of sync. run `vichat i` to install it again.")
		}

		if hasDifference(vimPlugins, "vim/ftplugin", os.ExpandEnv("$HOME/.vim/ftplugin")) {
			log.Fatalf("vichat vim plugin appears to be out of sync. run `vichat i` to install it again.")
		}

		if hasDifference(vimPlugins, "vim/syntax", os.ExpandEnv("$HOME/.vim/syntax")) {
			log.Fatalf("vichat vim plugin appears to be out of sync. run `vichat i` to install it again.")
		}

		var opts = cmd.Flags()
		var temperature float32 = DefaultTemperature
		var topp float32 = 0
		var maxTokens int = DefaultMaxTokens

		if m, err := opts.GetInt("max_tokens"); err == nil {
			maxTokens = m
		}

		if t, err := opts.GetFloat32("temperature"); err == nil {
			temperature = t
		}

		if p, err := opts.GetFloat32("top-p"); err == nil {
			topp = p
		}

		var input string
		var lines []string
		var isSimpleChat bool = false
		if !isatty.IsTerminal(os.Stdin.Fd()) {
			stdin, err := io.ReadAll(os.Stdin)
			if err != nil {
				log.Fatalf("failed to read input: %q", err)
				return
			}

			input = string(stdin)
			lines = strings.Split(string(input), "\n")
			if strings.HasPrefix(lines[0], "#") {
				temperature = getTemperature(lines[0])
				topp = getTopP(lines[0])
				maxTokens = getMaxTokens(lines[0])
				lines = lines[0:]
			}
		} else {
			input = strings.Join(args, " ")
			lines = []string{input}
			isSimpleChat = true
		}

		if temperature == 0 {

			temperature = math.SmallestNonzeroFloat32
		}

		if topp != 0 {

			temperature = 0
		}

		cfg := openai.DefaultConfig(os.Getenv("OPENAI_API_KEY"))
		cfg.BaseURL = os.Getenv("OPENAI_API_BASE")
		llmClient := openai.NewClientWithConfig(cfg)

		messages := ParseMessages(lines)
		if len(messages) == 0 {
			log.Fatalf("invalid input")
			return
		}

		if isSimpleChat {
			var promptStr []byte
			var err error
			optPormpt, _ := opts.GetString("system-prompt")
			if optPormpt == ":assistant:" {
				promptStr = []byte(DefaultSystemPrompt)
			} else {
				promptStr, err = os.ReadFile(optPormpt)
				if err != nil {
					prd := csv.NewReader(bytes.NewReader(awesomePrompts))
					embedPrompts, err := prd.ReadAll()
					if err == nil {
						index := make([]string, len(embedPrompts))
						for i := range embedPrompts {
							index[i] = strings.ToLower(embedPrompts[i][0])
						}

						matches := fuzzy.RankFind(optPormpt, index)
						sort.Sort(matches)

						hit := matches[0].OriginalIndex
						promptStr = []byte(embedPrompts[hit][1])
					}
				}
			}

			messages = append([]openai.ChatCompletionMessage{{
				Role:    openai.ChatMessageRoleSystem,
				Content: string(promptStr),
			}}, messages...)
		}

		isRenderOutput, _ := opts.GetBool("render")
		stream, _ := opts.GetBool("stream")

		if isRenderOutput {
			resp, err := llmClient.CreateChatCompletion(context.Background(),
				openai.ChatCompletionRequest{
					Model:       openai.GPT3Dot5Turbo,
					Temperature: temperature,
					TopP:        topp,
					MaxTokens:   maxTokens,
					Messages:    messages,
					Stream:      false,
				})
			if err != nil {
				log.Fatalf("failed to send chat: %q", err.Error())
				return
			}

			fmt.Printf("\n%s\n", markdown.Render(resp.Choices[0].Message.Content, 90, 4))
		} else {
			if isSimpleChat {

				dir, err := opts.GetString("outdir")
				if err != nil {
					dir = os.TempDir()
				}

				nonWords := regexp.MustCompile(`\W+`)
				filename := nonWords.ReplaceAllString(input, "_")
				if len(filename) > 50 {
					filename = filename[:50]
				}

				tmpf, err := os.CreateTemp(dir, fmt.Sprintf("%s-*.chat", filename))
				if err != nil {
					log.Fatalf("failed to create temp file: %q", err)
				}

				fmt.Fprintf(tmpf, "# temperature=%.1f, top_p=%0.1f, max_tokens=%d\n\n", temperature, topp, maxTokens)
				printPrompts(tmpf, messages)
				tmpf.Close()

				// invoke vim using cmd and open tmpf
				var cmd *exec.Cmd
				if input == "" {
					cmd = exec.Command("vim", "-c", "norm! GkA", tmpf.Name())
				} else {
					if stream {
						cmd = exec.Command("vim", "-c", "redraw|call SetStream(1)|Chat", tmpf.Name())
					} else {
						cmd = exec.Command("vim", "-c", "redraw|call SetStream(0)|Chat", tmpf.Name())
					}
				}

				cmd.Stdin = os.Stdin
				cmd.Stdout = os.Stdout

				cmd.Run()
			} else {
				if stream {
					resp, err := llmClient.CreateChatCompletionStream(context.Background(),
						openai.ChatCompletionRequest{
							Model:       openai.GPT3Dot5Turbo,
							Temperature: temperature,
							TopP:        topp,
							MaxTokens:   maxTokens,
							Messages:    messages,
						})

					if err != nil {
						log.Fatalf("failed to stream chat: %q", err.Error())
						return
					}

					defer resp.Close()
					for {
						response, err := resp.Recv()
						if errors.Is(err, io.EOF) {
							fmt.Println()
							return
						}

						if err != nil {
							fmt.Printf("\nStream error: %v\n", err)
							return
						}

						fmt.Printf(response.Choices[0].Delta.Content)
					}

				} else {
					resp, err := llmClient.CreateChatCompletion(context.Background(),
						openai.ChatCompletionRequest{
							Model:       openai.GPT3Dot5Turbo,
							Temperature: temperature,
							TopP:        topp,
							MaxTokens:   maxTokens,
							Messages:    messages,
							Stream:      false,
						})
					if err != nil {
						log.Fatalf("failed to send chat: %q", err.Error())
						return
					}

					res := resp.Choices[0].Message.Content
					if isRenderOutput {
						fmt.Printf("\n%s\n", markdown.Render(res, 90, 4))
					} else {
						fmt.Printf("%s\n\n", res)
					}
				}
			}
		}
	},
}
View Source
var InstallCmd = &cobra.Command{
	Use:     "install-vim-plugin",
	Short:   "install the vim plugin",
	Aliases: []string{"i"},
	Run: func(cmd *cobra.Command, args []string) {
		installVim()
	},
}
View Source
var SplitCmd = &cobra.Command{
	Use:   "split",
	Short: "Split a text into multiple chunks",
	Run: func(cmd *cobra.Command, args []string) {
		text, err := readAll(os.Stdin)
		if err != nil {
			log.Fatalf("failed to read input: %q", err)
		}
		chunkSize, _ := cmd.Flags().GetInt("chunk-size")
		overlap, _ := cmd.Flags().GetInt("overlap")

		chunks := RecursiveTextSplit(text, chunkSize, overlap)
		for i := range chunks {
			fmt.Println("--------------------------------------------")
			fmt.Println(chunks[i])
		}
	},
}
View Source
var TokCmd = &cobra.Command{
	Use:   "tok",
	Short: "given a piece of text, tok estimate the num of tokens for a given model offline",
	Run: func(cmd *cobra.Command, args []string) {

		f := cmd.Flags()
		model, err := f.GetString("model")
		if err != nil {
			log.Fatalf("failed to read model: %q", err)
		}

		text, err := readAll(os.Stdin)
		if err != nil {
			log.Fatalf("failed to read input: %q", err)
		}

		tokIDs, err := vichat.Tokenize(text, model)
		if err != nil {
			log.Fatalf("failed to tokenize: %q", err)
			return
		}

		if verbose, _ := f.GetBool("verbose"); verbose {
			toks, err := vichat.Decode(tokIDs, model)
			if err != nil {
				log.Fatalf("failed to decode: %q", err)
				return
			}

			for i := range tokIDs {
				fmt.Printf("%d:\t%q\n", tokIDs[i], toks[i])
			}
		} else {
			fmt.Println(len(tokIDs))
		}
	},
}

Functions

func ParseMessages added in v0.6.2

func ParseMessages(lines []string) []openai.ChatCompletionMessage

func RecursiveTextSplit added in v0.6.0

func RecursiveTextSplit(text string, chunkSize, overlap int) []string

Types

type TimeQuery added in v0.1.5

type TimeQuery struct {
	SecondsAgo int `json:"secondsAgo"`
}

type TimeResp added in v0.1.5

type TimeResp struct {
	Time  string `json:"time"`
	Query string `json:"query"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL