Merge pull request #85 from ssb-ngi-pointer/new-js-client
Use new ssb-room client module
This commit is contained in:
commit
0b7b22e11f
2
go.mod
2
go.mod
@ -27,7 +27,7 @@ require (
|
||||
github.com/volatiletech/sqlboiler-sqlite3 v0.0.0-20210314195744-a1c697a68aef // indirect
|
||||
github.com/volatiletech/sqlboiler/v4 v4.5.0
|
||||
github.com/volatiletech/strmangle v0.0.1
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59
|
||||
go.cryptoscope.co/netwrap v0.1.1
|
||||
go.cryptoscope.co/secretstream v1.2.2
|
||||
go.mindeco.de v1.8.0
|
||||
|
2
go.sum
2
go.sum
@ -463,6 +463,8 @@ go.cryptoscope.co/margaret v0.0.12-0.20190912103626-34323ad497f4 h1:gLSldWRujtUO
|
||||
go.cryptoscope.co/margaret v0.0.12-0.20190912103626-34323ad497f4/go.mod h1:3rt+RmZTFZEgfvFxz0ZPDBIWtLJOouWtzV6YbBl6sek=
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6 h1:p135TwijE3DbmklGygc7++MMRRVlujmjqed8kEOmwLs=
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6/go.mod h1:MgaeojIkWY3lLuoNw1mlMT3b3jiZwOj/fgsoGZp/VNA=
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59 h1:Gv5pKkvHYJNc12uRZ/jMCsR17G7v6oFLLCrGAUVxhvo=
|
||||
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59/go.mod h1:MgaeojIkWY3lLuoNw1mlMT3b3jiZwOj/fgsoGZp/VNA=
|
||||
go.cryptoscope.co/netwrap v0.1.0/go.mod h1:7zcYswCa4CT+ct54e9uH9+IIbYYETEMHKDNpzl8Ukew=
|
||||
go.cryptoscope.co/netwrap v0.1.1 h1:JLzzGKEvrUrkKzu3iM0DhpHmt+L/gYqmpcf1lJMUyFs=
|
||||
go.cryptoscope.co/netwrap v0.1.1/go.mod h1:7zcYswCa4CT+ct54e9uH9+IIbYYETEMHKDNpzl8Ukew=
|
||||
|
@ -1,229 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package nodejs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb"
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb/mockdb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.cryptoscope.co/muxrpc/v2"
|
||||
"go.cryptoscope.co/netwrap"
|
||||
"go.cryptoscope.co/secretstream"
|
||||
)
|
||||
|
||||
// all js end-to-end test as a sanity check
|
||||
func TestAllJSEndToEnd(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
// alice is the server now
|
||||
alice, port := ts.startJSBotAsServer("alice", "./testscripts/server.js")
|
||||
|
||||
aliceAddr := &net.TCPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: port,
|
||||
}
|
||||
|
||||
bob := ts.startJSClient("bob", "./testscripts/simple_client.js",
|
||||
aliceAddr,
|
||||
*alice,
|
||||
)
|
||||
|
||||
// claire wants to connect to bob through alice
|
||||
|
||||
// nasty multiserver-addr hack
|
||||
var roomHandle bytes.Buffer
|
||||
roomHandle.WriteString("tunnel:")
|
||||
roomHandle.WriteString(alice.Ref())
|
||||
roomHandle.WriteString(":")
|
||||
roomHandle.WriteString(bob.Ref())
|
||||
roomHandle.WriteString("~shs:")
|
||||
roomHandle.WriteString(base64.StdEncoding.EncodeToString(bob.ID))
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "claire", "endpoint_through_room.txt")
|
||||
os.MkdirAll(filepath.Dir(handleFile), 0700)
|
||||
err := ioutil.WriteFile(handleFile, roomHandle.Bytes(), 0700)
|
||||
r.NoError(err)
|
||||
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
claire := ts.startJSClient("claire", "./testscripts/simple_client_opening_tunnel.js",
|
||||
aliceAddr,
|
||||
*alice,
|
||||
)
|
||||
t.Log("this is claire:", claire.Ref())
|
||||
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
ts.wait()
|
||||
}
|
||||
|
||||
func TestJSClient(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
var membersDB = &mockdb.FakeMembersService{}
|
||||
var aliases = &mockdb.FakeAliasesService{}
|
||||
srv := ts.startGoServer(membersDB, aliases)
|
||||
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
|
||||
|
||||
alice := ts.startJSClient("alice", "./testscripts/simple_client.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
var roomHandle bytes.Buffer
|
||||
roomHandle.WriteString("tunnel:")
|
||||
roomHandle.WriteString(srv.Whoami().Ref())
|
||||
roomHandle.WriteString(":")
|
||||
roomHandle.WriteString(alice.Ref())
|
||||
roomHandle.WriteString("~shs:")
|
||||
roomHandle.WriteString(base64.StdEncoding.EncodeToString(alice.ID))
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
|
||||
os.MkdirAll(filepath.Dir(handleFile), 0700)
|
||||
err := ioutil.WriteFile(handleFile, roomHandle.Bytes(), 0700)
|
||||
r.NoError(err)
|
||||
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
ts.startJSClient("bob", "./testscripts/simple_client_opening_tunnel.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
ts.wait()
|
||||
}
|
||||
|
||||
func TestJSServer(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
a := assert.New(t)
|
||||
|
||||
os.RemoveAll("testrun")
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
// alice is the server now
|
||||
alice, port := ts.startJSBotAsServer("alice", "./testscripts/server.js")
|
||||
|
||||
// a 2nd js instance but as a client
|
||||
aliceAddr := &net.TCPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: port,
|
||||
}
|
||||
|
||||
// now connect our go client
|
||||
var membersDB = &mockdb.FakeMembersService{}
|
||||
var aliasesDB = &mockdb.FakeAliasesService{}
|
||||
client := ts.startGoServer(membersDB, aliasesDB)
|
||||
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
|
||||
|
||||
var roomHandle bytes.Buffer
|
||||
roomHandle.WriteString("tunnel:")
|
||||
roomHandle.WriteString(alice.Ref())
|
||||
roomHandle.WriteString(":")
|
||||
roomHandle.WriteString(client.Whoami().Ref())
|
||||
roomHandle.WriteString("~shs:")
|
||||
roomHandle.WriteString(base64.StdEncoding.EncodeToString(client.Whoami().ID))
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
|
||||
os.MkdirAll(filepath.Dir(handleFile), 0700)
|
||||
err := ioutil.WriteFile(handleFile, roomHandle.Bytes(), 0700)
|
||||
r.NoError(err)
|
||||
|
||||
bob := ts.startJSClient("bob", "./testscripts/simple_client_opening_tunnel.js",
|
||||
aliceAddr,
|
||||
*alice,
|
||||
)
|
||||
t.Log("started bob:", bob.Ref())
|
||||
|
||||
// connect to alice
|
||||
aliceShsAddr := netwrap.WrapAddr(aliceAddr, secretstream.Addr{PubKey: alice.ID})
|
||||
|
||||
ctx, connCancel := context.WithCancel(context.TODO())
|
||||
err = client.Network.Connect(ctx, aliceShsAddr)
|
||||
defer connCancel()
|
||||
r.NoError(err, "connect #1 failed")
|
||||
|
||||
// this might fail if the previous node process is still running...
|
||||
// TODO: properly write cleanup
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
srvEdp, has := client.Network.GetEndpointFor(*alice)
|
||||
r.True(has, "botA has no endpoint for the server")
|
||||
t.Log("connected")
|
||||
|
||||
// let B listen for changes
|
||||
newRoomMember, err := srvEdp.Source(ctx, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "endpoints"})
|
||||
r.NoError(err)
|
||||
|
||||
newMemberChan := make(chan string)
|
||||
|
||||
// read all the messages from endpoints and throw them over the channel
|
||||
go func() {
|
||||
for newRoomMember.Next(ctx) {
|
||||
body, err := newRoomMember.Bytes()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newMemberChan <- string(body)
|
||||
}
|
||||
close(newMemberChan)
|
||||
}()
|
||||
|
||||
// announce A
|
||||
var ret bool
|
||||
err = srvEdp.Async(ctx, &ret, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "announce"})
|
||||
r.NoError(err)
|
||||
a.False(ret, "would assume these are true but..?")
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("timeout")
|
||||
case got := <-newMemberChan:
|
||||
t.Log("received join?")
|
||||
t.Log(got)
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
err = srvEdp.Async(ctx, &ret, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "leave"})
|
||||
r.NoError(err)
|
||||
a.False(ret, "would assume these are true but..?")
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("timeout")
|
||||
case got := <-newMemberChan:
|
||||
t.Log("received leave?")
|
||||
t.Log(got)
|
||||
}
|
||||
|
||||
srvEdp.Terminate()
|
||||
|
||||
ts.wait()
|
||||
}
|
1215
muxrpc/test/nodejs/package-lock.json
generated
1215
muxrpc/test/nodejs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -14,11 +14,13 @@
|
||||
"sodium-native": "^3.2.0",
|
||||
"ssb-config": "^3.4.5",
|
||||
"ssb-conn": "^0.19.1",
|
||||
"ssb-db": "^20.3.0",
|
||||
"ssb-db2": "^1.18.5",
|
||||
"ssb-gossip": "^1.1.1",
|
||||
"ssb-keys": "^8.0.0",
|
||||
"ssb-replicate": "^1.3.2",
|
||||
"ssb-room": "^1.3.0",
|
||||
"ssb-room-client": "^0.2.0",
|
||||
"tap-spec": "^5.0.0",
|
||||
"tape": "^5.0.1"
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
const Path = require('path')
|
||||
const { loadOrCreateSync } = require('ssb-keys')
|
||||
const tapSpec = require("tap-spec")
|
||||
const tape = require('tape')
|
||||
const theStack = require('secret-stack')
|
||||
const ssbCaps = require('ssb-caps')
|
||||
@ -11,19 +12,26 @@ if (testSHSappKey !== false) {
|
||||
testAppkey = testSHSappKey
|
||||
}
|
||||
|
||||
const createSbot = theStack({caps: {shs: testAppkey } })
|
||||
.use(require('ssb-db'))
|
||||
.use(require('ssb-conn'))
|
||||
.use(require('ssb-room/tunnel/client'))
|
||||
let createSbot = theStack({caps: {shs: testAppkey } })
|
||||
.use(require('ssb-db2'))
|
||||
.use(require('ssb-db2/compat/db'))
|
||||
.use(require('./testscripts/secretstack_testplugin.js'))
|
||||
|
||||
const testName = process.env.TEST_NAME
|
||||
|
||||
// the other peer we are talking to
|
||||
const testPeerAddr = process.env.TEST_PEERADDR
|
||||
const testPeerRef = process.env.TEST_PEERREF
|
||||
|
||||
const testSession = require(process.env['TEST_SESSIONSCRIPT'])
|
||||
|
||||
const path = require("path")
|
||||
const scriptname = path.basename(__filename)
|
||||
|
||||
// load the plugins needed for this session
|
||||
for (plug of testSession.secretStackPlugins) {
|
||||
createSbot = createSbot.use(require(plug))
|
||||
}
|
||||
|
||||
function bufFromEnv(evname) {
|
||||
const has = process.env[evname]
|
||||
if (has) {
|
||||
@ -32,28 +40,31 @@ function bufFromEnv(evname) {
|
||||
return false
|
||||
}
|
||||
|
||||
tape.createStream().pipe(process.stderr)
|
||||
tape.createStream().pipe(tapSpec()).pipe(process.stderr)
|
||||
tape(testName, function (t) {
|
||||
let timeoutLength = 15000
|
||||
function comment (msg) {
|
||||
t.comment(`[${scriptname}] ${msg}`)
|
||||
}
|
||||
let timeoutLength = 30000
|
||||
var tapeTimeout = null
|
||||
function ready() { // needs to be called by the before block when it's done
|
||||
t.timeoutAfter(timeoutLength) // doesn't exit the process
|
||||
tapeTimeout = setTimeout(() => {
|
||||
t.comment('test timeout')
|
||||
comment('!! test did not complete before timeout; shutting everything down')
|
||||
process.exit(1)
|
||||
}, timeoutLength*1.25)
|
||||
}, timeoutLength)
|
||||
const to = `net:${testPeerAddr}~shs:${testPeerRef.substr(1).replace('.ed25519', '')}`
|
||||
t.comment('dialing:' + to)
|
||||
comment(`dialing: ${to}`)
|
||||
sbot.conn.connect(to, (err, rpc) => {
|
||||
t.error(err, 'connected')
|
||||
t.comment('connected to: '+rpc.id)
|
||||
testSession.after(sbot, rpc, exit)
|
||||
comment(`connected to: ${rpc.id}`)
|
||||
testSession.after(t, sbot, rpc, exit)
|
||||
})
|
||||
}
|
||||
|
||||
function exit() { // call this when you're done
|
||||
sbot.close()
|
||||
t.comment('closed sbot')
|
||||
comment(`closed client: ${testName}`)
|
||||
clearTimeout(tapeTimeout)
|
||||
t.end()
|
||||
process.exit(0)
|
||||
@ -87,8 +98,8 @@ tape(testName, function (t) {
|
||||
|
||||
const sbot = createSbot(opts)
|
||||
const alice = sbot.whoami()
|
||||
t.comment('client spawned. I am:' + alice.id)
|
||||
comment(`client spawned. I am: ${alice.id}`)
|
||||
|
||||
console.log(alice.id) // tell go process who's incoming
|
||||
testSession.before(sbot, ready)
|
||||
testSession.before(t, sbot, ready)
|
||||
})
|
||||
|
@ -1,31 +1,39 @@
|
||||
const Path = require('path')
|
||||
const tapSpec = require('tap-spec')
|
||||
const tape = require('tape')
|
||||
const { loadOrCreateSync } = require('ssb-keys')
|
||||
const theStack = require('secret-stack')
|
||||
const ssbCaps = require('ssb-caps')
|
||||
|
||||
const testSHSappKey = bufFromEnv('TEST_APPKEY')
|
||||
|
||||
let testAppkey = Buffer.from(ssbCaps.shs, 'base64')
|
||||
if (testSHSappKey !== false) {
|
||||
testAppkey = testSHSappKey
|
||||
}
|
||||
|
||||
// stackOpts = {appKey: require('ssb-caps').shs}
|
||||
stackOpts = {caps: {shs: testAppkey } }
|
||||
const createSbot = theStack(stackOpts)
|
||||
.use(require('ssb-db'))
|
||||
.use(require('ssb-conn'))
|
||||
.use(require('ssb-room/tunnel/server'))
|
||||
// .use(require('ssb-logging'))
|
||||
let createSbot = theStack(stackOpts)
|
||||
.use(require('ssb-db2'))
|
||||
.use(require('ssb-db2/compat/db'))
|
||||
|
||||
const testName = process.env['TEST_NAME']
|
||||
const testPort = process.env['TEST_PORT']
|
||||
const testSession = require(process.env['TEST_SESSIONSCRIPT'])
|
||||
|
||||
tape.createStream().pipe(process.stderr);
|
||||
const path = require("path")
|
||||
const scriptname = path.basename(__filename)
|
||||
|
||||
// load the plugins needed for this session
|
||||
for (plug of testSession.secretStackPlugins) {
|
||||
createSbot = createSbot.use(require(plug))
|
||||
}
|
||||
|
||||
tape.createStream().pipe(tapSpec()).pipe(process.stderr);
|
||||
tape(testName, function (t) {
|
||||
// t.timeoutAfter(30000) // doesn't exit the process
|
||||
function comment (msg) {
|
||||
t.comment(`[${scriptname}] ${msg}`)
|
||||
}
|
||||
// t.timeoutAfter(30000) // doesn't exit the process
|
||||
// const tapeTimeout = setTimeout(() => {
|
||||
// t.comment("test timeout")
|
||||
// process.exit(1)
|
||||
@ -33,9 +41,10 @@ tape(testName, function (t) {
|
||||
|
||||
function exit() { // call this when you're done
|
||||
sbot.close()
|
||||
t.comment('closed jsbot')
|
||||
comment(`closed server: ${testName}`)
|
||||
// clearTimeout(tapeTimeout)
|
||||
t.end()
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
const tempRepo = process.env['TEST_REPO']
|
||||
@ -48,20 +57,17 @@ tape(testName, function (t) {
|
||||
})
|
||||
const alice = sbot.whoami()
|
||||
|
||||
// const replicate_changes = sbot.replicate.changes()
|
||||
|
||||
t.comment("sbot spawned, running before")
|
||||
comment("sbot spawned, running before")
|
||||
|
||||
function ready() {
|
||||
t.comment('server spawned. I am:' + alice.id)
|
||||
comment(`server spawned, I am: ${alice.id}`)
|
||||
console.log(alice.id) // tell go process who our pubkey
|
||||
}
|
||||
testSession.before(sbot, ready)
|
||||
|
||||
|
||||
testSession.before(t, sbot, ready)
|
||||
|
||||
sbot.on("rpc:connect", (remote, isClient) => {
|
||||
t.comment("new connection: "+ remote.id)
|
||||
testSession.after(sbot, remote, exit)
|
||||
comment(`new connection: ${remote.id}`)
|
||||
testSession.after(t, sbot, remote, exit)
|
||||
})
|
||||
})
|
||||
|
||||
@ -72,4 +78,4 @@ function bufFromEnv(evname) {
|
||||
return Buffer.from(has, 'base64')
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -10,12 +10,16 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/internal/network"
|
||||
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb"
|
||||
|
||||
@ -89,7 +93,8 @@ func newSession(t *testing.T, appKey []byte) *testSession {
|
||||
func (ts *testSession) startGoServer(
|
||||
membersDB roomdb.MembersService,
|
||||
aliasDB roomdb.AliasesService,
|
||||
opts ...roomsrv.Option) *roomsrv.Server {
|
||||
opts ...roomsrv.Option,
|
||||
) *roomsrv.Server {
|
||||
r := require.New(ts.t)
|
||||
|
||||
// prepend defaults
|
||||
@ -106,7 +111,12 @@ func (ts *testSession) startGoServer(
|
||||
|
||||
opts = append(opts,
|
||||
roomsrv.WithPostSecureConnWrapper(func(conn net.Conn) (net.Conn, error) {
|
||||
return debug.WrapDump(filepath.Join("testrun", ts.t.Name(), "muxdump"), conn)
|
||||
ref, err := network.GetFeedRefFromAddr(conn.RemoteAddr())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fname := filepath.Join("testrun", ts.t.Name(), "muxdump", ref.ShortRef())
|
||||
return debug.WrapDump(fname, conn)
|
||||
}),
|
||||
)
|
||||
|
||||
@ -119,6 +129,8 @@ func (ts *testSession) startGoServer(
|
||||
|
||||
ts.done.Go(func() error {
|
||||
err := srv.Network.Serve(ts.ctx)
|
||||
// if the muxrpc protocol fucks up by e.g. unpacking body data into a header, this type of error will be surfaced here and look scary in the test output
|
||||
// example: https://github.com/ssb-ngi-pointer/go-ssb-room/pull/85#issuecomment-801106687
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
err = fmt.Errorf("go server exited: %w", err)
|
||||
ts.t.Log(err)
|
||||
@ -132,8 +144,14 @@ func (ts *testSession) startGoServer(
|
||||
|
||||
var jsBotCnt = 0
|
||||
|
||||
// returns the jsbots pubkey
|
||||
func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr, peerRef refs.FeedRef) refs.FeedRef {
|
||||
// starts a node process in the client role. returns the jsbots pubkey
|
||||
func (ts *testSession) startJSClient(
|
||||
name,
|
||||
testScript string,
|
||||
// the perr the client should connect to at first (here usually the room server)
|
||||
peerAddr net.Addr,
|
||||
peerRef refs.FeedRef,
|
||||
) refs.FeedRef {
|
||||
ts.t.Log("starting client", name)
|
||||
r := require.New(ts.t)
|
||||
cmd := exec.CommandContext(ts.ctx, "node", "../../../sbot_client.js")
|
||||
@ -166,12 +184,16 @@ func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr,
|
||||
}
|
||||
|
||||
cmd.Env = env
|
||||
|
||||
started := time.Now()
|
||||
r.NoError(cmd.Start(), "failed to init test js-sbot")
|
||||
|
||||
ts.done.Go(func() error {
|
||||
err := cmd.Wait()
|
||||
ts.t.Logf("node client %s: exited with %v (after %s)", name, err, time.Since(started))
|
||||
// we need to return the error code to have an idea if any of the tape assertions failed
|
||||
if err != nil {
|
||||
ts.t.Logf("node server %s: exited with %s", name, err)
|
||||
return fmt.Errorf("node client %s exited with %s", name, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@ -182,12 +204,17 @@ func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr,
|
||||
pubScanner := bufio.NewScanner(outrc) // TODO muxrpc comms?
|
||||
r.True(pubScanner.Scan(), "multiple lines of output from js - expected #1 to be %s pubkey/id", name)
|
||||
|
||||
go io.Copy(os.Stderr, outrc) // restore node stdout to stderr behavior
|
||||
|
||||
jsBotRef, err := refs.ParseFeedRef(pubScanner.Text())
|
||||
r.NoError(err, "failed to get %s key from JS process")
|
||||
ts.t.Logf("JS %s:%d %s", name, jsBotCnt, jsBotRef.Ref())
|
||||
return *jsBotRef
|
||||
}
|
||||
|
||||
// startJSBotAsServer returns the servers public key and it's TCP port on localhost.
|
||||
// This is only here to check compliance against the old javascript server.
|
||||
// We don't care so much about it's internal behavior, just that clients can connect through it.
|
||||
func (ts *testSession) startJSBotAsServer(name, testScriptFileName string) (*refs.FeedRef, int) {
|
||||
r := require.New(ts.t)
|
||||
cmd := exec.CommandContext(ts.ctx, "node", "../../../sbot_serv.js")
|
||||
@ -220,13 +247,12 @@ func (ts *testSession) startJSBotAsServer(name, testScriptFileName string) (*ref
|
||||
}
|
||||
cmd.Env = env
|
||||
|
||||
started := time.Now()
|
||||
r.NoError(cmd.Start(), "failed to init test js-sbot")
|
||||
|
||||
ts.done.Go(func() error {
|
||||
err := cmd.Wait()
|
||||
if err != nil {
|
||||
ts.t.Logf("node server %s: exited with %s", name, err)
|
||||
}
|
||||
ts.t.Logf("node server %s: exited with %v (after %s)", name, err, time.Since(started))
|
||||
return nil
|
||||
})
|
||||
ts.t.Cleanup(func() {
|
||||
|
63
muxrpc/test/nodejs/testscripts/client-opening-tunnel.js
Normal file
63
muxrpc/test/nodejs/testscripts/client-opening-tunnel.js
Normal file
@ -0,0 +1,63 @@
|
||||
const pull = require('pull-stream')
|
||||
const { readFileSync } = require('fs')
|
||||
const path = require("path")
|
||||
const scriptname = path.basename(__filename)
|
||||
|
||||
let newConnections = 0
|
||||
|
||||
module.exports = (t, client, roomrpc, exit) => {
|
||||
// shadow t.comment to include file making the comment
|
||||
function comment (msg) {
|
||||
t.comment(`[${scriptname}] ${msg}`)
|
||||
}
|
||||
newConnections++
|
||||
comment(`new connection: ${roomrpc.id}`)
|
||||
comment(`total connections: ${newConnections}`)
|
||||
|
||||
if (newConnections > 1) {
|
||||
comment('more than two connnections, not doing anything')
|
||||
return
|
||||
}
|
||||
|
||||
// we are now connected to the room server.
|
||||
// log all new endpoints
|
||||
pull(
|
||||
roomrpc.tunnel.endpoints(),
|
||||
pull.drain(el => {
|
||||
comment(`from roomsrv: ${JSON.stringify(el)}`)
|
||||
})
|
||||
)
|
||||
|
||||
// give the room time to start
|
||||
setTimeout(() => {
|
||||
// announce ourselves to the room/tunnel
|
||||
roomrpc.tunnel.announce((err, ret) => {
|
||||
t.error(err, 'announce on server')
|
||||
comment('announced!')
|
||||
|
||||
// put there by the go test process
|
||||
let roomHandle = readFileSync('endpoint_through_room.txt').toString()
|
||||
comment(`connecting to room handle: ${roomHandle}`)
|
||||
|
||||
client.conn.connect(roomHandle, (err, tunneledrpc) => {
|
||||
t.error(err, 'connect through room')
|
||||
comment(`got a tunnel to: ${tunneledrpc.id}`)
|
||||
|
||||
// check the tunnel connection works
|
||||
tunneledrpc.testing.working((err, ok) => {
|
||||
t.error(err, 'testing.working didnt error')
|
||||
t.true(ok, 'testing.working is true')
|
||||
|
||||
// start leaving after 2s
|
||||
setTimeout(() => {
|
||||
roomrpc.tunnel.leave((err, ret) => {
|
||||
t.error(err, 'tunnel.leave')
|
||||
comment('left room... exiting in 1s')
|
||||
setTimeout(exit, 1000)
|
||||
})
|
||||
}, 2000)
|
||||
})
|
||||
})
|
||||
})
|
||||
}, 5000)
|
||||
}
|
43
muxrpc/test/nodejs/testscripts/client.js
Normal file
43
muxrpc/test/nodejs/testscripts/client.js
Normal file
@ -0,0 +1,43 @@
|
||||
const pull = require('pull-stream')
|
||||
const path = require("path")
|
||||
const scriptname = path.basename(__filename)
|
||||
|
||||
module.exports = (t, sbot, rpc, exit) => {
|
||||
// shadow t.comment to include file making the comment
|
||||
function comment (msg) {
|
||||
t.comment(`[${scriptname}] ${msg}`)
|
||||
}
|
||||
// this waits for a new incoming connection _after_ the room server is connected already
|
||||
// so it will be an incomming tunnel client.
|
||||
// since this calls exit() - if no client connects it will not exit
|
||||
sbot.on("rpc:connect", (remote, isClient) => {
|
||||
comment("tunneled connection to simple client!")
|
||||
|
||||
// leave after 3 seconds (give the other party time to call `testing.working()`)
|
||||
setTimeout(() => {
|
||||
rpc.tunnel.leave((err, ret) => {
|
||||
t.error(err, 'tunnel.leave')
|
||||
comment(`tunnel error: ${err}`)
|
||||
comment(`leave value: ${ret}`)
|
||||
comment('left, exiting in 1s')
|
||||
setTimeout(exit, 1000)
|
||||
})
|
||||
}, 3000)
|
||||
})
|
||||
|
||||
// announce ourselves to the room/tunnel
|
||||
rpc.tunnel.announce((err, ret) => {
|
||||
t.error(err, 'tunnel.announce')
|
||||
comment(`announce error: ${err}`)
|
||||
comment(`announce value: ${ret}`)
|
||||
comment('announced!')
|
||||
})
|
||||
|
||||
// log all new endpoints
|
||||
pull(
|
||||
rpc.tunnel.endpoints(),
|
||||
pull.drain(el => {
|
||||
comment(`from roomsrv: ${el}`)
|
||||
})
|
||||
)
|
||||
}
|
8
muxrpc/test/nodejs/testscripts/legacy_client.js
Normal file
8
muxrpc/test/nodejs/testscripts/legacy_client.js
Normal file
@ -0,0 +1,8 @@
|
||||
const secretStackPlugins = require('./secretstack-legacy')
|
||||
const before = require('./minimal-before-setup')
|
||||
const performClientTest = require('./client')
|
||||
module.exports = {
|
||||
secretStackPlugins,
|
||||
before,
|
||||
after: performClientTest
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
const secretStackPlugins = require('./secretstack-legacy')
|
||||
const before = require('./minimal-before-setup')
|
||||
const performOpeningTunnelTest = require('./client-opening-tunnel')
|
||||
|
||||
module.exports = {
|
||||
secretStackPlugins,
|
||||
before,
|
||||
after: performOpeningTunnelTest
|
||||
}
|
34
muxrpc/test/nodejs/testscripts/legacy_server.js
Normal file
34
muxrpc/test/nodejs/testscripts/legacy_server.js
Normal file
@ -0,0 +1,34 @@
|
||||
const pull = require('pull-stream')
|
||||
const path = require("path")
|
||||
const scriptname = path.basename(__filename)
|
||||
|
||||
let connections = 0
|
||||
|
||||
module.exports = {
|
||||
secretStackPlugins: ['ssb-conn', 'ssb-room/tunnel/server'],
|
||||
|
||||
before: (t, sbot, ready) => {
|
||||
pull(
|
||||
sbot.conn.hub().listen(),
|
||||
pull.drain((p) => {
|
||||
t.comment(`[legacy-server.js] peer change ${p.type}: ${p.key}`)
|
||||
})
|
||||
)
|
||||
setTimeout(ready, 1000)
|
||||
},
|
||||
|
||||
after: (t, sbot, client, exit) => {
|
||||
function comment (msg) {
|
||||
t.comment(`[${scriptname}] ${msg}`)
|
||||
}
|
||||
// this runs twice (for each connection)
|
||||
connections++
|
||||
comment(`new connection: ${client.id}`)
|
||||
comment(`total connections: ${connections}`)
|
||||
|
||||
if (connections == 2) {
|
||||
t.comment('2nd connection received. exiting in 10 seconds')
|
||||
setTimeout(exit, 10000)
|
||||
}
|
||||
}
|
||||
}
|
3
muxrpc/test/nodejs/testscripts/minimal-before-setup.js
Normal file
3
muxrpc/test/nodejs/testscripts/minimal-before-setup.js
Normal file
@ -0,0 +1,3 @@
|
||||
module.exports = (t, sbot, ready) => {
|
||||
ready()
|
||||
}
|
8
muxrpc/test/nodejs/testscripts/modern_client.js
Normal file
8
muxrpc/test/nodejs/testscripts/modern_client.js
Normal file
@ -0,0 +1,8 @@
|
||||
const secretStackPlugins = require('./secretstack-modern')
|
||||
const before = require('./minimal-before-setup')
|
||||
const performClientTest = require('./client')
|
||||
module.exports = {
|
||||
secretStackPlugins,
|
||||
before,
|
||||
after: performClientTest
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
const secretStackPlugins = require('./secretstack-modern') // use modern tunnel
|
||||
const before = require('./minimal-before-setup')
|
||||
const performOpeningTunnelTest = require('./client-opening-tunnel')
|
||||
|
||||
module.exports = {
|
||||
secretStackPlugins,
|
||||
before,
|
||||
after: performOpeningTunnelTest
|
||||
}
|
4
muxrpc/test/nodejs/testscripts/secretstack-legacy.js
Normal file
4
muxrpc/test/nodejs/testscripts/secretstack-legacy.js
Normal file
@ -0,0 +1,4 @@
|
||||
module.exports = [
|
||||
'ssb-conn',
|
||||
'ssb-room/tunnel/client'
|
||||
]
|
4
muxrpc/test/nodejs/testscripts/secretstack-modern.js
Normal file
4
muxrpc/test/nodejs/testscripts/secretstack-modern.js
Normal file
@ -0,0 +1,4 @@
|
||||
module.exports = [
|
||||
'ssb-conn',
|
||||
'ssb-room-client'
|
||||
]
|
20
muxrpc/test/nodejs/testscripts/secretstack_testplugin.js
Normal file
20
muxrpc/test/nodejs/testscripts/secretstack_testplugin.js
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
this testing plugin supplies a very simple method to see if the other side is working
|
||||
*/
|
||||
module.exports = {
|
||||
name: 'testing',
|
||||
version: '1.0.0',
|
||||
manifest: {
|
||||
working: 'async'
|
||||
},
|
||||
permissions: {
|
||||
anonymous: { allow: ['working'] },
|
||||
},
|
||||
init(ssb) {
|
||||
return {
|
||||
working(cb) {
|
||||
cb(null, true)
|
||||
}
|
||||
};
|
||||
},
|
||||
};
|
@ -1,19 +0,0 @@
|
||||
const pull = require('pull-stream')
|
||||
|
||||
module.exports = {
|
||||
before: (sbot, ready) => {
|
||||
pull(
|
||||
sbot.conn.hub().listen(),
|
||||
pull.drain((p) => {
|
||||
console.warn('peer change:',p.type, p.key)
|
||||
})
|
||||
)
|
||||
setTimeout(ready, 1000)
|
||||
},
|
||||
|
||||
after: (sbot, client, exit) => {
|
||||
// hrm.. this runs twice (for each connection)
|
||||
console.warn('server new connection:', client.id)
|
||||
setTimeout(exit, 30000)
|
||||
}
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
const pull = require('pull-stream')
|
||||
|
||||
let newConnections = 0
|
||||
|
||||
module.exports = {
|
||||
before: (sbot, ready) => {
|
||||
ready()
|
||||
},
|
||||
|
||||
after: (sbot, rpc, exit) => {
|
||||
sbot.on("rpc:connect", (remote, isClient) => {
|
||||
console.warn("tunneld connection to simple client!")
|
||||
|
||||
// leave after 5 seconds
|
||||
setTimeout(() => {
|
||||
rpc.tunnel.leave().then((ret) => {
|
||||
console.warn('left')
|
||||
console.warn(ret)
|
||||
console.warn('room left... exiting in 10s')
|
||||
setTimeout(exit, 10000)
|
||||
}).catch((err) => {
|
||||
console.warn('left failed')
|
||||
throw err
|
||||
})
|
||||
}, 5000)
|
||||
})
|
||||
|
||||
// announce ourselves to the room/tunnel
|
||||
rpc.tunnel.announce().then((ret) => {
|
||||
console.warn('announced!')
|
||||
console.warn(ret)
|
||||
}).catch((err) => {
|
||||
console.warn('announce failed')
|
||||
throw err
|
||||
})
|
||||
|
||||
// log all new endpoints
|
||||
pull(
|
||||
rpc.tunnel.endpoints(),
|
||||
pull.drain(el => {
|
||||
console.warn("from roomsrv:",el)
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
const pull = require('pull-stream')
|
||||
const { readFileSync } = require('fs')
|
||||
|
||||
|
||||
let newConnections = 0
|
||||
|
||||
module.exports = {
|
||||
before: (client, ready) => {
|
||||
// nothing to prepare (like publishes messages, or...)
|
||||
ready()
|
||||
|
||||
// let msg = {
|
||||
// type: 'test',
|
||||
// }
|
||||
// client.publish(msg, (err) => {
|
||||
// if (err) throw err
|
||||
// })
|
||||
},
|
||||
|
||||
after: (client, roomSrvRpc, exit) => {
|
||||
newConnections++
|
||||
console.warn('new connection!', roomSrvRpc.id, 'total:', newConnections)
|
||||
|
||||
if (newConnections > 1) {
|
||||
console.warn('after call 2 - not exiting')
|
||||
return
|
||||
}
|
||||
// now connected to the room server
|
||||
|
||||
// log all new endpoints
|
||||
pull(
|
||||
roomSrvRpc.tunnel.endpoints(),
|
||||
pull.drain(el => {
|
||||
console.warn("from roomsrv:",el)
|
||||
})
|
||||
)
|
||||
|
||||
roomSrvRpc.tunnel.isRoom().then((yes) => {
|
||||
if (!yes) throw new Error("expected isRoom to be true!")
|
||||
console.warn("peer is indeed a room!")
|
||||
|
||||
// announce ourselves to the room/tunnel
|
||||
roomSrvRpc.tunnel.announce().then((ret) => {
|
||||
console.warn('announced!')
|
||||
|
||||
// put there by the go test process
|
||||
let roomHandle = readFileSync('endpoint_through_room.txt').toString()
|
||||
console.warn("connecting to room handle:", roomHandle)
|
||||
|
||||
client.conn.connect(roomHandle, (err, tunneldRpc) => {
|
||||
if (err) throw err
|
||||
console.warn("got tunnel to:", tunneldRpc.id)
|
||||
|
||||
// check the tunnel connection works
|
||||
tunneldRpc.tunnel.ping((err, id) => {
|
||||
if (err) throw err
|
||||
console.warn("ping:", id)
|
||||
|
||||
// start leaving after 2s
|
||||
setTimeout(() => {
|
||||
roomSrvRpc.tunnel.leave().then((ret) => {
|
||||
console.warn('left room... exiting in 3s')
|
||||
setTimeout(exit, 3000)
|
||||
}).catch((err) => {
|
||||
console.warn('left failed')
|
||||
throw err
|
||||
})
|
||||
}, 2000)
|
||||
})
|
||||
})
|
||||
|
||||
}).catch((err) => {
|
||||
console.warn('announce failed')
|
||||
throw err
|
||||
})
|
||||
|
||||
}).catch((err) => {
|
||||
console.warn('isRoom failed')
|
||||
throw err
|
||||
})
|
||||
}
|
||||
}
|
@ -13,12 +13,20 @@ proably by turning the exported object into an init function which returns the {
|
||||
// const pull = require('pull-stream')
|
||||
|
||||
module.exports = {
|
||||
before: (sbot, ready) => {
|
||||
secretStackPlugins: ['ssb-blobs', 'ssb-what-ever-you-need'],
|
||||
|
||||
// t is the tape instance for assertions
|
||||
// sbot is the local sbot api
|
||||
// ready is a function to signal that preperation is done
|
||||
before: (t, sbot, ready) => {
|
||||
console.warn('before connect...')
|
||||
setTimeout(ready, 1000)
|
||||
},
|
||||
|
||||
after: (sbot, exit) => {
|
||||
// t and sbot are same as above
|
||||
// clientRpc is the muxrpc client to the other remote (i.e a rpc handle for the room the client is connected to)
|
||||
// exit() is a function that needs to be called to halt the process and exit (it also calls t.end())
|
||||
after: (t, sbot, clientRpc, exit) => {
|
||||
console.warn('after connect...')
|
||||
|
||||
setTimeout(exit, 5000)
|
||||
|
147
muxrpc/test/nodejs/tunnel_connect_test.go
Normal file
147
muxrpc/test/nodejs/tunnel_connect_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package nodejs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb"
|
||||
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb/mockdb"
|
||||
refs "go.mindeco.de/ssb-refs"
|
||||
)
|
||||
|
||||
// legacy js end-to-end test as a sanity check
|
||||
// it runs the ssb-room server against two ssb-room clients
|
||||
func TestLegacyJSEndToEnd(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
// alice is the server now
|
||||
alice, port := ts.startJSBotAsServer("alice", "./testscripts/legacy_server.js")
|
||||
|
||||
aliceAddr := &net.TCPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: port,
|
||||
}
|
||||
|
||||
bob := ts.startJSClient("bob", "./testscripts/legacy_client.js",
|
||||
aliceAddr,
|
||||
*alice,
|
||||
)
|
||||
|
||||
// claire wants to connect to bob through alice
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "claire", "endpoint_through_room.txt")
|
||||
r.NoError(writeRoomHandleFile(*alice, bob, handleFile))
|
||||
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
ts.startJSClient("claire", "./testscripts/legacy_client_opening_tunnel.js",
|
||||
aliceAddr,
|
||||
*alice,
|
||||
)
|
||||
|
||||
t.Log("waiting for process exits")
|
||||
// it would be nice to have a signal here to know when the legacy client is done.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
ts.wait()
|
||||
}
|
||||
|
||||
// Two ssb-room clients against a Go server
|
||||
func TestGoServerLegacyJSClient(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
var membersDB = &mockdb.FakeMembersService{}
|
||||
var aliases = &mockdb.FakeAliasesService{}
|
||||
srv := ts.startGoServer(membersDB, aliases)
|
||||
// allow all peers (there arent any we dont want to allow)
|
||||
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
|
||||
|
||||
alice := ts.startJSClient("alice", "./testscripts/legacy_client.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
|
||||
r.NoError(writeRoomHandleFile(srv.Whoami(), alice, handleFile))
|
||||
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
ts.startJSClient("bob", "./testscripts/legacy_client_opening_tunnel.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
t.Log("waiting for process exits")
|
||||
// it would be nice to have a signal here to know when the legacy client is done.
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// cancels all contexts => kills all the running processes and waits
|
||||
// (everything should have exited by now!)
|
||||
ts.wait()
|
||||
}
|
||||
|
||||
// the new ssb-room-client module (2x) against a Go room server
|
||||
func TestModernJSClient(t *testing.T) {
|
||||
// defer leakcheck.Check(t)
|
||||
r := require.New(t)
|
||||
|
||||
ts := newRandomSession(t)
|
||||
// ts := newSession(t, nil)
|
||||
|
||||
var membersDB = &mockdb.FakeMembersService{}
|
||||
var aliasesDB = &mockdb.FakeAliasesService{}
|
||||
srv := ts.startGoServer(membersDB, aliasesDB)
|
||||
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
|
||||
|
||||
// allow all peers (there arent any we dont want to allow in this test)
|
||||
|
||||
alice := ts.startJSClient("alice", "./testscripts/modern_client.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
// write the handle to the testrun folder of the bot
|
||||
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
|
||||
r.NoError(writeRoomHandleFile(srv.Whoami(), alice, handleFile))
|
||||
|
||||
ts.startJSClient("bob", "./testscripts/modern_client_opening_tunnel.js",
|
||||
srv.Network.GetListenAddr(),
|
||||
srv.Whoami(),
|
||||
)
|
||||
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
ts.wait()
|
||||
}
|
||||
|
||||
func writeRoomHandleFile(srv, target refs.FeedRef, filePath string) error {
|
||||
var roomHandle bytes.Buffer
|
||||
roomHandle.WriteString("tunnel:")
|
||||
roomHandle.WriteString(srv.Ref())
|
||||
roomHandle.WriteString(":")
|
||||
roomHandle.WriteString(target.Ref())
|
||||
roomHandle.WriteString("~shs:")
|
||||
roomHandle.WriteString(base64.StdEncoding.EncodeToString(target.ID))
|
||||
|
||||
os.MkdirAll(filepath.Dir(filePath), 0700)
|
||||
return ioutil.WriteFile(filePath, roomHandle.Bytes(), 0700)
|
||||
}
|
Loading…
Reference in New Issue
Block a user