Compare commits

...

3 Commits

5 changed files with 39 additions and 14 deletions

View File

@ -1,3 +1,5 @@
all: build
build: lint build: lint
CGO_ENABLED=0 go build -v -trimpath -ldflags='-s -w' CGO_ENABLED=0 go build -v -trimpath -ldflags='-s -w'
@ -13,4 +15,4 @@ send: pack
clean: clean:
rm -f tgbot_misaka_5882f7 tgbot_misaka_5882f7.* rm -f tgbot_misaka_5882f7 tgbot_misaka_5882f7.*
.PHONY: build pack send clean .PHONY: all build pack send clean test

View File

@ -1,3 +1,9 @@
`MISAKA 5882F7` # `MISAKA 5882F7`
Just another random telegram bot. Just another random telegram bot.
Code is messy. Expect a lot of hiccups.
## Known problems
- Message caching needs improvement

View File

@ -1,6 +1,8 @@
package main package main
import ( import (
"crypto/md5"
"encoding/base64"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -134,7 +136,10 @@ func assistantStreamedResponse(request openai.ChatRequest, cb assistantStreamedR
if minWaitSatisfied { if minWaitSatisfied {
break Drain break Drain
} }
<-minWait select {
case <-minWait:
case <-resp.Done:
}
minWaitSatisfied = true minWaitSatisfied = true
} }
} }
@ -155,7 +160,7 @@ func assistantStreamedResponse(request openai.ChatRequest, cb assistantStreamedR
minWaitDurSecs := lo.Min([]int{nUpdates, 4}) + nErrs*3 minWaitDurSecs := lo.Min([]int{nUpdates, 4}) + nErrs*3
minWait = time.After(time.Duration(minWaitDurSecs) * time.Second) minWait = time.After(time.Duration(minWaitDurSecs) * time.Second)
// send the partial message // Send the partial message
respText := respBuilder.String() + assistantWritingSign respText := respBuilder.String() + assistantWritingSign
if err := cb(respText, false); err != nil { if err := cb(respText, false); err != nil {
logger.Warnw("failed to send partial update", "error", err) logger.Warnw("failed to send partial update", "error", err)
@ -231,6 +236,7 @@ func handleAssistantConversation(c tele.Context, thread []*tele.Message) error {
Messages: chatReqMsgs, Messages: chatReqMsgs,
Temperature: lo.ToPtr(0.42), Temperature: lo.ToPtr(0.42),
MaxTokens: 2048, MaxTokens: 2048,
User: assistantHashUserId(lastMsg.Sender.ID),
} }
typingNotifyCh := setTyping(c) typingNotifyCh := setTyping(c)
@ -269,3 +275,9 @@ func assistantRemoveMention(msg, name string) string {
} }
return orig return orig
} }
func assistantHashUserId(uid int64) string {
seasoned := []byte("RdnuRPqp66vtbc28QRO0ecKSLKXifz7G9UbXLoyCMpw" + strconv.FormatInt(uid, 10))
hashed := md5.Sum(seasoned) // Don't judge me
return base64.URLEncoding.EncodeToString(hashed[:])[:22]
}

View File

@ -18,16 +18,16 @@ type ChatMessage struct {
type ChatRequest struct { type ChatRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []ChatMessage `json:"messages"` Messages []ChatMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"` Temperature *float64 `json:"temperature,omitempty"` // What sampling temperature to use, between 0 and 2.
TopP *float64 `json:"top_p,omitempty"` TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling. Specify this or temperature but not both.
N int `json:"n,omitempty"` N int `json:"n,omitempty"` // How many chat completion choices to generate for each input message.
Stream bool `json:"stream,omitempty"` Stream bool `json:"stream,omitempty"` // If set, partial message deltas will be sent as data-only server-sent events as they become available.
Stop []string `json:"stop,omitempty"` Stop []string `json:"stop,omitempty"` // Up to 4 sequences where the API will stop generating further tokens.
MaxTokens int `json:"max_tokens,omitempty"` MaxTokens int `json:"max_tokens,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"` PresencePenalty *float64 `json:"presence_penalty,omitempty"` // Number between -2.0 and 2.0.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // Number between -2.0 and 2.0.
LogitBias map[string]float64 `json:"logit_bias,omitempty"` LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Modify the likelihood of specified tokens appearing in the completion.
User string `json:"user,omitempty"` User string `json:"user,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
} }
type ChatResponseChoice struct { type ChatResponseChoice struct {
@ -51,6 +51,7 @@ type ChatResponseStream struct {
Created int Created int
Model string Model string
Stream chan string Stream chan string
Done chan struct{}
Err error Err error
} }

View File

@ -77,11 +77,15 @@ func (c *Client) ChatCompletionStream(request ChatRequest) (*ChatResponseStream,
return nil, errors.Errorf("status code: %d, body: %q", resp.StatusCode(), respBodyStr) return nil, errors.Errorf("status code: %d, body: %q", resp.StatusCode(), respBodyStr)
} }
ret := &ChatResponseStream{Stream: make(chan string, 1024)} ret := &ChatResponseStream{
Stream: make(chan string, 1024),
Done: make(chan struct{}),
}
go func() { go func() {
defer func() { defer func() {
rbody.Close() rbody.Close()
close(ret.Stream) close(ret.Stream)
close(ret.Done)
}() }()
var contentBegan bool var contentBegan bool