overhauling a bunch of stuff

This commit is contained in:
Henry 2021-03-17 15:01:34 +01:00
parent 4ccb342bff
commit 3cea539128
14 changed files with 911 additions and 421 deletions

2
go.mod
View File

@ -27,7 +27,7 @@ require (
github.com/volatiletech/sqlboiler-sqlite3 v0.0.0-20210314195744-a1c697a68aef // indirect
github.com/volatiletech/sqlboiler/v4 v4.5.0
github.com/volatiletech/strmangle v0.0.1
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59
go.cryptoscope.co/netwrap v0.1.1
go.cryptoscope.co/secretstream v1.2.2
go.mindeco.de v1.8.0

2
go.sum
View File

@ -463,6 +463,8 @@ go.cryptoscope.co/margaret v0.0.12-0.20190912103626-34323ad497f4 h1:gLSldWRujtUO
go.cryptoscope.co/margaret v0.0.12-0.20190912103626-34323ad497f4/go.mod h1:3rt+RmZTFZEgfvFxz0ZPDBIWtLJOouWtzV6YbBl6sek=
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6 h1:p135TwijE3DbmklGygc7++MMRRVlujmjqed8kEOmwLs=
go.cryptoscope.co/muxrpc/v2 v2.0.0-20210202162901-fe642d405dc6/go.mod h1:MgaeojIkWY3lLuoNw1mlMT3b3jiZwOj/fgsoGZp/VNA=
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59 h1:Gv5pKkvHYJNc12uRZ/jMCsR17G7v6oFLLCrGAUVxhvo=
go.cryptoscope.co/muxrpc/v2 v2.0.0-beta.1.0.20210308090127-5f1f5f9cbb59/go.mod h1:MgaeojIkWY3lLuoNw1mlMT3b3jiZwOj/fgsoGZp/VNA=
go.cryptoscope.co/netwrap v0.1.0/go.mod h1:7zcYswCa4CT+ct54e9uH9+IIbYYETEMHKDNpzl8Ukew=
go.cryptoscope.co/netwrap v0.1.1 h1:JLzzGKEvrUrkKzu3iM0DhpHmt+L/gYqmpcf1lJMUyFs=
go.cryptoscope.co/netwrap v0.1.1/go.mod h1:7zcYswCa4CT+ct54e9uH9+IIbYYETEMHKDNpzl8Ukew=

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@
"sodium-native": "^3.2.0",
"ssb-config": "^3.4.5",
"ssb-conn": "^0.19.1",
"ssb-db": "^20.3.0",
"ssb-db2": "^1.18.5",
"ssb-gossip": "^1.1.1",
"ssb-keys": "^8.0.0",
"ssb-replicate": "^1.3.2",

View File

@ -12,7 +12,9 @@ if (testSHSappKey !== false) {
}
let createSbot = theStack({caps: {shs: testAppkey } })
.use(require('ssb-db'))
.use(require('ssb-db2'))
.use(require('ssb-db2/compat/db'))
.use(require('./testscripts/secretstack_testplugin.js'))
const testName = process.env.TEST_NAME

View File

@ -12,7 +12,8 @@ if (testSHSappKey !== false) {
stackOpts = {caps: {shs: testAppkey } }
let createSbot = theStack(stackOpts)
.use(require('ssb-db'))
.use(require('ssb-db2'))
.use(require('ssb-db2/compat/db'))
const testName = process.env['TEST_NAME']
const testPort = process.env['TEST_PORT']

View File

@ -10,12 +10,16 @@ import (
"encoding/base64"
"errors"
"fmt"
"io"
mrand "math/rand"
"net"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/ssb-ngi-pointer/go-ssb-room/internal/network"
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb"
@ -89,7 +93,8 @@ func newSession(t *testing.T, appKey []byte) *testSession {
func (ts *testSession) startGoServer(
membersDB roomdb.MembersService,
aliasDB roomdb.AliasesService,
opts ...roomsrv.Option) *roomsrv.Server {
opts ...roomsrv.Option,
) *roomsrv.Server {
r := require.New(ts.t)
// prepend defaults
@ -106,7 +111,12 @@ func (ts *testSession) startGoServer(
opts = append(opts,
roomsrv.WithPostSecureConnWrapper(func(conn net.Conn) (net.Conn, error) {
return debug.WrapDump(filepath.Join("testrun", ts.t.Name(), "muxdump"), conn)
ref, err := network.GetFeedRefFromAddr(conn.RemoteAddr())
if err != nil {
return nil, err
}
fname := filepath.Join("testrun", ts.t.Name(), "muxdump", ref.ShortRef())
return debug.WrapDump(fname, conn)
}),
)
@ -132,8 +142,14 @@ func (ts *testSession) startGoServer(
var jsBotCnt = 0
// returns the jsbots pubkey
func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr, peerRef refs.FeedRef) refs.FeedRef {
// starts a node process in the client role. returns the jsbots pubkey
func (ts *testSession) startJSClient(
name,
testScript string,
// the perr the client should connect to at first (here usually the room server)
peerAddr net.Addr,
peerRef refs.FeedRef,
) refs.FeedRef {
ts.t.Log("starting client", name)
r := require.New(ts.t)
cmd := exec.CommandContext(ts.ctx, "node", "../../../sbot_client.js")
@ -166,14 +182,18 @@ func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr,
}
cmd.Env = env
started := time.Now()
r.NoError(cmd.Start(), "failed to init test js-sbot")
ts.done.Go(func() error {
err := cmd.Wait()
ts.t.Logf("node client %s: exited with %v (after %s)", name, err, time.Since(started))
// we need to return the error code to have an idea if any of the tape assertions failed
if err != nil {
ts.t.Logf("node client %s: exited with %s", name, err)
return fmt.Errorf("node client %s exited with %s", name, err)
}
return err
return nil
})
ts.t.Cleanup(func() {
cmd.Process.Kill()
@ -182,12 +202,17 @@ func (ts *testSession) startJSClient(name, testScript string, peerAddr net.Addr,
pubScanner := bufio.NewScanner(outrc) // TODO muxrpc comms?
r.True(pubScanner.Scan(), "multiple lines of output from js - expected #1 to be %s pubkey/id", name)
go io.Copy(os.Stderr, outrc) // restore node stdout to stderr behavior
jsBotRef, err := refs.ParseFeedRef(pubScanner.Text())
r.NoError(err, "failed to get %s key from JS process")
ts.t.Logf("JS %s:%d %s", name, jsBotCnt, jsBotRef.Ref())
return *jsBotRef
}
// startJSBotAsServer returns the servers public key and it's TCP port on localhost.
// This is only here to check compliance against the old javascript server.
// We don't care so much about it's internal behavior, just that clients can connect through it.
func (ts *testSession) startJSBotAsServer(name, testScriptFileName string) (*refs.FeedRef, int) {
r := require.New(ts.t)
cmd := exec.CommandContext(ts.ctx, "node", "../../../sbot_serv.js")
@ -220,14 +245,13 @@ func (ts *testSession) startJSBotAsServer(name, testScriptFileName string) (*ref
}
cmd.Env = env
started := time.Now()
r.NoError(cmd.Start(), "failed to init test js-sbot")
ts.done.Go(func() error {
err := cmd.Wait()
if err != nil {
ts.t.Logf("node server %s: exited with %s", name, err)
}
return err
ts.t.Logf("node server %s: exited with %v (after %s)", name, err, time.Since(started))
return nil
})
ts.t.Cleanup(func() {
cmd.Process.Kill()

View File

@ -1,23 +1,29 @@
const pull = require('pull-stream')
module.exports = {
secretStackPlugins: ['ssb-conn', 'ssb-room/tunnel/client'],
secretStackPlugins: [
'ssb-conn',
'ssb-room/tunnel/client',
],
before: (t, sbot, ready) => {
ready()
},
after: (t, sbot, rpc, exit) => {
// this waits for a new incomming connection _after_ the room server is connected already
// so it will be an incomming tunnel client.
// since this calls exit() - if no client connects it will not exit
sbot.on("rpc:connect", (remote, isClient) => {
console.warn("tunneld connection to simple client!")
// leave after 5 seconds
// leave after 3 seconds (give the other party time to call ping)
setTimeout(() => {
rpc.tunnel.leave().then((ret) => {
console.warn('left')
console.warn(ret)
console.warn('room left... exiting in 10s')
setTimeout(exit, 10000)
console.warn('room left... exiting in 1s')
setTimeout(exit, 1000)
}).catch((err) => {
t.error(err, 'tunnel.leave failed')
})

View File

@ -14,24 +14,25 @@ module.exports = {
after: (t, client, roomSrvRpc, exit) => {
newConnections++
t.comment('new connection!' + roomSrvRpc.id)
t.comment('client new connection!' + roomSrvRpc.id)
t.comment('total connections:' + newConnections)
if (newConnections > 1) {
t.comment('after call 2 - not exiting')
t.comment('got a 2nd connection')
return
}
// now connected to the room server
// log all new endpoints
pull(
roomSrvRpc.tunnel.endpoints(),
pull.drain(el => {
t.comment("from roomsrv:",el)
t.comment("from roomsrv:", el)
})
)
roomSrvRpc.tunnel.isRoom().then((yes) => {
roomSrvRpc.tunnel.isRoom((err, yes) => {
t.error(err, "tunnel.isRoom failed")
t.equal(yes, true, "expected isRoom to return true!")
t.comment("peer is indeed a room!")
@ -42,7 +43,7 @@ module.exports = {
// put there by the go test process
let roomHandle = readFileSync('endpoint_through_room.txt').toString()
t.comment("connecting to room handle:", roomHandle)
t.comment("connecting to room handle:" + roomHandle)
client.conn.connect(roomHandle, (err, tunneldRpc) => {
t.error(err, "connected")
@ -51,25 +52,20 @@ module.exports = {
// check the tunnel connection works
tunneldRpc.tunnel.ping((err, timestamp) => {
t.error(err, "ping over the tunnel")
t.true(timestamp > 0, "ping returns a timestamp")
t.comment("ping:"+timestamp)
// start leaving after 1s
setTimeout(() => {
roomSrvRpc.tunnel.leave().then((ret) => {
t.comment('left room... exiting in 3s')
setTimeout(exit, 3000)
}).catch((err) => {
t.error(err, 'leave')
})
}, 1000)
roomSrvRpc.tunnel.leave().then((ret) => {
t.comment('left room... exiting in 1s')
setTimeout(exit, 1000)
}).catch((err) => {
t.error(err, 'leave')
})
})
})
}).catch((err) => {
t.error(err, 'announce')
})
}).catch((err) => {
t.error(err, 'isRoom failed')
})
}
}

View File

@ -22,8 +22,8 @@ module.exports = {
t.comment(`total connections: ${connections}`)
if (connections == 2) {
t.comment('2nd connection received. exiting in 15 seconds')
setTimeout(exit, 15000)
t.comment('2nd connection received. exiting in 10 seconds')
setTimeout(exit, 10000)
}
}
}

View File

@ -1,7 +1,10 @@
const pull = require('pull-stream')
module.exports = {
secretStackPlugins: ['ssb-conn', 'ssb-room-client'],
secretStackPlugins: [
'ssb-conn',
'ssb-room-client',
],
before: (t, sbot, ready) => {
ready()
@ -11,19 +14,26 @@ module.exports = {
sbot.on("rpc:connect", (remote, isClient) => {
console.warn("tunneld connection to simple client!")
// leave after 5 seconds
setTimeout(() => {
rpc.tunnel.leave().then((ret) => {
console.warn('left')
console.warn(ret)
console.warn('room left... exiting in 10s')
setTimeout(exit, 10000)
}).catch((err) => {
console.warn('left failed')
throw err
})
}, 5000)
})
// check the tunnel connection works
remote.testing.working((err, ok) => {
t.error(err, 'testing.working didnt error')
t.true(ok, 'testing.working is true')
// leave after 5 seconds
setTimeout(() => {
rpc.tunnel.leave().then((ret) => {
console.warn('left')
console.warn(ret)
console.warn('room left... exiting in 10s')
setTimeout(exit, 10000)
}).catch((err) => {
console.warn('left failed')
throw err
})
}, 5000)
})
}) // on rpc:connect
// announce ourselves to the room/tunnel
rpc.tunnel.announce().then((ret) => {

View File

@ -5,7 +5,10 @@ const { readFileSync } = require('fs')
let newConnections = 0
module.exports = {
secretStackPlugins: ['ssb-conn', 'ssb-room-client'],
secretStackPlugins: [
'ssb-conn',
'ssb-room-client',
],
before: (t, client, ready) => {
// nothing to prepare (like publishes messages, or...)
@ -14,64 +17,53 @@ module.exports = {
after: (t, client, roomSrvRpc, exit) => {
newConnections++
console.warn('new connection!', roomSrvRpc.id, 'total:', newConnections)
t.comment('client new connection!' + roomSrvRpc.id)
t.comment('total connections:' + newConnections)
if (newConnections > 1) {
console.warn('after call 2 - not exiting')
t.comment('after call 2 - not doing anything')
return
}
// now connected to the room server
// log all new endpoints
pull(
roomSrvRpc.tunnel.endpoints(),
pull.drain(el => {
console.warn("from roomsrv:",el)
t.comment("from roomsrv:" + JSON.stringify(el))
})
)
roomSrvRpc.tunnel.isRoom().then((yes) => {
if (!yes) throw new Error("expected isRoom to be true!")
console.warn("peer is indeed a room!")
// announce ourselves to the room/tunnel
roomSrvRpc.tunnel.announce().then((ret) => {
t.comment('announced!')
// announce ourselves to the room/tunnel
roomSrvRpc.tunnel.announce().then((ret) => {
console.warn('announced!')
// put there by the go test process
let roomHandle = readFileSync('endpoint_through_room.txt').toString()
t.comment("connecting to room handle:", roomHandle)
// put there by the go test process
let roomHandle = readFileSync('endpoint_through_room.txt').toString()
console.warn("connecting to room handle:", roomHandle)
client.conn.connect(roomHandle, (err, tunneldRpc) => {
t.error(err, 'connect through room')
t.comment("got tunnel to:", tunneldRpc.id)
client.conn.connect(roomHandle, (err, tunneldRpc) => {
if (err) throw err
console.warn("got tunnel to:", tunneldRpc.id)
// check the tunnel connection works
tunneldRpc.testing.working((err, ok) => {
t.error(err, 'testing.working didnt error')
t.true(ok, 'testing.working is true')
// check the tunnel connection works
tunneldRpc.tunnel.ping((err, id) => {
if (err) throw err
console.warn("ping:", id)
// start leaving after 2s
setTimeout(() => {
roomSrvRpc.tunnel.leave().then((ret) => {
console.warn('left room... exiting in 3s')
setTimeout(exit, 3000)
}).catch((err) => {
console.warn('left failed')
throw err
})
}, 2000)
})
// start leaving after 2s
setTimeout(() => {
roomSrvRpc.tunnel.leave().then((ret) => {
t.comment('left room... exiting in 3s')
setTimeout(exit, 3000)
}).catch((err) => {
t.error(err, 'left leaving')
})
}, 2000)
})
}).catch((err) => {
console.warn('announce failed')
throw err
})
}).catch((err) => {
console.warn('isRoom failed')
throw err
t.error(err, 'announce on server')
})
}
}

View File

@ -0,0 +1,20 @@
/*
this testing plugin supplies a very simple method to see if the other side is working
*/
module.exports = {
name: 'testing',
version: '1.0.0',
manifest: {
working: 'async'
},
permissions: {
anonymous: { allow: ['working'] },
},
init(ssb) {
return {
working(cb) {
cb(null, true)
}
};
},
};

View File

@ -4,7 +4,6 @@ package nodejs_test
import (
"bytes"
"context"
"encoding/base64"
"io/ioutil"
"net"
@ -13,11 +12,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.cryptoscope.co/muxrpc/v2"
"go.cryptoscope.co/netwrap"
"go.cryptoscope.co/secretstream"
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb"
"github.com/ssb-ngi-pointer/go-ssb-room/roomdb/mockdb"
@ -54,20 +49,20 @@ func TestLegacyJSEndToEnd(t *testing.T) {
time.Sleep(1000 * time.Millisecond)
claire := ts.startJSClient("claire", "./testscripts/legacy_client_opening_tunnel.js",
ts.startJSClient("claire", "./testscripts/legacy_client_opening_tunnel.js",
aliceAddr,
*alice,
)
t.Log("this is claire:", claire.Ref())
t.Log("waiting for process exits")
time.Sleep(45 * time.Second)
// it would be nice to have a signal here to know when the legacy client is done.
time.Sleep(10 * time.Second)
ts.wait()
}
// Two ssb-room clients against a Go server
func TestLegacyJSClient(t *testing.T) {
func TestGoServerLegacyJSClient(t *testing.T) {
// defer leakcheck.Check(t)
r := require.New(t)
@ -77,6 +72,7 @@ func TestLegacyJSClient(t *testing.T) {
var membersDB = &mockdb.FakeMembersService{}
var aliases = &mockdb.FakeAliasesService{}
srv := ts.startGoServer(membersDB, aliases)
// allow all peers (there arent any we dont want to allow)
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
alice := ts.startJSClient("alice", "./testscripts/legacy_client.js",
@ -94,106 +90,9 @@ func TestLegacyJSClient(t *testing.T) {
srv.Whoami(),
)
time.Sleep(5 * time.Second)
ts.wait()
}
// A Go "client" with a JS ssb-room server and client
func TestLegacyJSServer(t *testing.T) {
// defer leakcheck.Check(t)
r := require.New(t)
a := assert.New(t)
os.RemoveAll("testrun")
ts := newRandomSession(t)
// ts := newSession(t, nil)
// alice is the server now
alice, port := ts.startJSBotAsServer("alice", "./testscripts/legacy_server.js")
aliceAddr := &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: port,
}
// now connect our go client
var membersDB = &mockdb.FakeMembersService{}
var aliasesDB = &mockdb.FakeAliasesService{}
client := ts.startGoServer(membersDB, aliasesDB)
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
// write the handle to the testrun folder of the bot
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
r.NoError(writeRoomHandleFile(*alice, client.Whoami(), handleFile))
// a 2nd js instance but as a client
bob := ts.startJSClient("bob", "./testscripts/legacy_client_opening_tunnel.js",
aliceAddr,
*alice,
)
t.Log("started bob:", bob.Ref())
// connect to the server alice
aliceShsAddr := netwrap.WrapAddr(aliceAddr, secretstream.Addr{PubKey: alice.ID})
ctx, connCancel := context.WithCancel(context.TODO())
err := client.Network.Connect(ctx, aliceShsAddr)
defer connCancel()
r.NoError(err, "connect #1 failed")
time.Sleep(2 * time.Second)
srvEdp, has := client.Network.GetEndpointFor(*alice)
r.True(has, "botA has no endpoint for the server")
t.Log("connected")
// let B listen for changes
newRoomMember, err := srvEdp.Source(ctx, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "endpoints"})
r.NoError(err)
newMemberChan := make(chan string)
// read all the messages from endpoints and throw them over the channel
go func() {
for newRoomMember.Next(ctx) {
body, err := newRoomMember.Bytes()
if err != nil {
panic(err)
}
newMemberChan <- string(body)
}
close(newMemberChan)
}()
// announce A
var ret bool
err = srvEdp.Async(ctx, &ret, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "announce"})
r.NoError(err)
a.False(ret, "would assume these are true but..?")
select {
case <-time.After(3 * time.Second):
t.Error("timeout")
case got := <-newMemberChan:
t.Log("received join?")
t.Log(got)
}
time.Sleep(5 * time.Second)
err = srvEdp.Async(ctx, &ret, muxrpc.TypeJSON, muxrpc.Method{"tunnel", "leave"})
r.NoError(err)
a.False(ret, "would assume these are true but..?")
select {
case <-time.After(3 * time.Second):
t.Error("timeout")
case got := <-newMemberChan:
t.Log("received leave?")
t.Log(got)
}
srvEdp.Terminate()
t.Log("waiting for process exits")
// it would be nice to have a signal here to know when the legacy client is done.
time.Sleep(10 * time.Second)
ts.wait()
}
@ -206,30 +105,28 @@ func TestModernJSClient(t *testing.T) {
ts := newRandomSession(t)
// ts := newSession(t, nil)
var allowDB = &mockdb.FakeAllowListService{}
var aliasDB = &mockdb.FakeAliasService{}
srv := ts.startGoServer(allowDB, aliasDB)
var membersDB = &mockdb.FakeMembersService{}
var aliasesDB = &mockdb.FakeAliasesService{}
srv := ts.startGoServer(membersDB, aliasesDB)
membersDB.GetByFeedReturns(roomdb.Member{Nickname: "free4all"}, nil)
// allow all peers (there arent any we dont want to allow in this test)
alice := ts.startJSClient("alice", "./testscripts/modern_client.js",
srv.Network.GetListenAddr(),
srv.Whoami(),
)
srv.Allow(alice, true)
// write the handle to the testrun folder of the bot
handleFile := filepath.Join("testrun", t.Name(), "bob", "endpoint_through_room.txt")
r.NoError(writeRoomHandleFile(srv.Whoami(), alice, handleFile))
time.Sleep(1500 * time.Millisecond)
bob := ts.startJSClient("bob", "./testscripts/modern_client_opening_tunnel.js",
ts.startJSClient("bob", "./testscripts/modern_client_opening_tunnel.js",
srv.Network.GetListenAddr(),
srv.Whoami(),
)
srv.Allow(bob, true)
allowDB.HasFeedReturns(true)
time.Sleep(5 * time.Second)
time.Sleep(15 * time.Second)
ts.wait()
}