Compare commits
No commits in common. "b60be9ae7e5818ff01a95aad87e06ac5f57bfb03" and "5e65a2ce32a6883d1d966d5feb543c9e7fcf6d9b" have entirely different histories.
b60be9ae7e
...
5e65a2ce32
4
Makefile
4
Makefile
|
@ -1,5 +1,3 @@
|
||||||
all: build
|
|
||||||
|
|
||||||
build: lint
|
build: lint
|
||||||
CGO_ENABLED=0 go build -v -trimpath -ldflags='-s -w'
|
CGO_ENABLED=0 go build -v -trimpath -ldflags='-s -w'
|
||||||
|
|
||||||
|
@ -15,4 +13,4 @@ send: pack
|
||||||
clean:
|
clean:
|
||||||
rm -f tgbot_misaka_5882f7 tgbot_misaka_5882f7.*
|
rm -f tgbot_misaka_5882f7 tgbot_misaka_5882f7.*
|
||||||
|
|
||||||
.PHONY: all build pack send clean test
|
.PHONY: build pack send clean
|
||||||
|
|
|
@ -1,9 +1,3 @@
|
||||||
# `MISAKA 5882F7`
|
`MISAKA 5882F7`
|
||||||
|
|
||||||
Just another random telegram bot.
|
Just another random telegram bot.
|
||||||
|
|
||||||
Code is messy. Expect a lot of hiccups.
|
|
||||||
|
|
||||||
## Known problems
|
|
||||||
|
|
||||||
- Message caching needs improvement
|
|
||||||
|
|
16
assistant.go
16
assistant.go
|
@ -1,8 +1,6 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -136,10 +134,7 @@ func assistantStreamedResponse(request openai.ChatRequest, cb assistantStreamedR
|
||||||
if minWaitSatisfied {
|
if minWaitSatisfied {
|
||||||
break Drain
|
break Drain
|
||||||
}
|
}
|
||||||
select {
|
<-minWait
|
||||||
case <-minWait:
|
|
||||||
case <-resp.Done:
|
|
||||||
}
|
|
||||||
minWaitSatisfied = true
|
minWaitSatisfied = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +155,7 @@ func assistantStreamedResponse(request openai.ChatRequest, cb assistantStreamedR
|
||||||
minWaitDurSecs := lo.Min([]int{nUpdates, 4}) + nErrs*3
|
minWaitDurSecs := lo.Min([]int{nUpdates, 4}) + nErrs*3
|
||||||
minWait = time.After(time.Duration(minWaitDurSecs) * time.Second)
|
minWait = time.After(time.Duration(minWaitDurSecs) * time.Second)
|
||||||
|
|
||||||
// Send the partial message
|
// send the partial message
|
||||||
respText := respBuilder.String() + assistantWritingSign
|
respText := respBuilder.String() + assistantWritingSign
|
||||||
if err := cb(respText, false); err != nil {
|
if err := cb(respText, false); err != nil {
|
||||||
logger.Warnw("failed to send partial update", "error", err)
|
logger.Warnw("failed to send partial update", "error", err)
|
||||||
|
@ -236,7 +231,6 @@ func handleAssistantConversation(c tele.Context, thread []*tele.Message) error {
|
||||||
Messages: chatReqMsgs,
|
Messages: chatReqMsgs,
|
||||||
Temperature: lo.ToPtr(0.42),
|
Temperature: lo.ToPtr(0.42),
|
||||||
MaxTokens: 2048,
|
MaxTokens: 2048,
|
||||||
User: assistantHashUserId(lastMsg.Sender.ID),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
typingNotifyCh := setTyping(c)
|
typingNotifyCh := setTyping(c)
|
||||||
|
@ -275,9 +269,3 @@ func assistantRemoveMention(msg, name string) string {
|
||||||
}
|
}
|
||||||
return orig
|
return orig
|
||||||
}
|
}
|
||||||
|
|
||||||
func assistantHashUserId(uid int64) string {
|
|
||||||
seasoned := []byte("RdnuRPqp66vtbc28QRO0ecKSLKXifz7G9UbXLoyCMpw" + strconv.FormatInt(uid, 10))
|
|
||||||
hashed := md5.Sum(seasoned) // Don't judge me
|
|
||||||
return base64.URLEncoding.EncodeToString(hashed[:])[:22]
|
|
||||||
}
|
|
||||||
|
|
|
@ -18,16 +18,16 @@ type ChatMessage struct {
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Messages []ChatMessage `json:"messages"`
|
Messages []ChatMessage `json:"messages"`
|
||||||
Temperature *float64 `json:"temperature,omitempty"` // What sampling temperature to use, between 0 and 2.
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling. Specify this or temperature but not both.
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
N int `json:"n,omitempty"` // How many chat completion choices to generate for each input message.
|
N int `json:"n,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"` // If set, partial message deltas will be sent as data-only server-sent events as they become available.
|
Stream bool `json:"stream,omitempty"`
|
||||||
Stop []string `json:"stop,omitempty"` // Up to 4 sequences where the API will stop generating further tokens.
|
Stop []string `json:"stop,omitempty"`
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"` // Number between -2.0 and 2.0.
|
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // Number between -2.0 and 2.0.
|
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||||
LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Modify the likelihood of specified tokens appearing in the completion.
|
LogitBias map[string]float64 `json:"logit_bias,omitempty"`
|
||||||
User string `json:"user,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
|
User string `json:"user,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChatResponseChoice struct {
|
type ChatResponseChoice struct {
|
||||||
|
@ -51,7 +51,6 @@ type ChatResponseStream struct {
|
||||||
Created int
|
Created int
|
||||||
Model string
|
Model string
|
||||||
Stream chan string
|
Stream chan string
|
||||||
Done chan struct{}
|
|
||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,15 +77,11 @@ func (c *Client) ChatCompletionStream(request ChatRequest) (*ChatResponseStream,
|
||||||
return nil, errors.Errorf("status code: %d, body: %q", resp.StatusCode(), respBodyStr)
|
return nil, errors.Errorf("status code: %d, body: %q", resp.StatusCode(), respBodyStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret := &ChatResponseStream{
|
ret := &ChatResponseStream{Stream: make(chan string, 1024)}
|
||||||
Stream: make(chan string, 1024),
|
|
||||||
Done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
rbody.Close()
|
rbody.Close()
|
||||||
close(ret.Stream)
|
close(ret.Stream)
|
||||||
close(ret.Done)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var contentBegan bool
|
var contentBegan bool
|
||||||
|
|
Loading…
Reference in New Issue