When a user is waiting in a lobby and clicks “ask to join” the moderator is not currently notified with a prompt to either allow the user to join or reject them.
Upon restarting of the prosody service on the meet instance, the results are intermittent. Sometimes it fixes the “ask to join” knocking issue other times it breaks the “ask to join” functionality.
However consistency remains until the prosody service is restarted again.
This means:
If “ask to join works”: restarting prosody might break it.
if “ask to join” does not work: restarting prosody might fix it.
I have included the logs upon prosody service start up of when it works and not works and have compared them.
We are hosting jitsi with haproxy → meet → VB, JVB all on separate VMS.
As mentioned above I have upgraded the version to 0.12.0
I did see that error within the logs “Jun 06 14:21:01 mod_posix error Failed to daemonize: already-daemonized”. However after a side by side comparison they appear in both the "start_up_working.txt (knocking works) and startup_not_working.txt (knocking does not work) Thus that would be unlikely to be the issue?
Update on this! I have found out that this only occurs when moderators join the lobby. non moderators can use “ask to join” as expected.
I have found this error in my jicfo logs
java.lang.RuntimeException: Failed to grant owner:
at org.jitsi.impl.protocol.xmpp.ChatRoomImpl.grantOwnership(ChatRoomImpl.java:475)
at org.jitsi.jicofo.xmpp.muc.ChatRoomRoleManager.grantOwner(ChatRoomRoleManager.kt:43)
at org.jitsi.jicofo.xmpp.muc.AuthenticationRoleManager.memberJoined(ChatRoomRoleManager.kt:156)
at org.jitsi.impl.protocol.xmpp.ChatRoomImpl.lambda$processOtherPresence$12(ChatRoomImpl.java:819)
at org.jitsi.utils.event.SyncEventEmitter$fireEvent$1$1.invoke(EventEmitter.kt:64)
at org.jitsi.utils.event.SyncEventEmitter$fireEvent$1$1.invoke(EventEmitter.kt:64)
at org.jitsi.utils.event.BaseEventEmitter.wrap(EventEmitter.kt:49)
at org.jitsi.utils.event.SyncEventEmitter.fireEvent(EventEmitter.kt:64)
at org.jitsi.impl.protocol.xmpp.ChatRoomImpl.processOtherPresence(ChatRoomImpl.java:818)
at org.jitsi.impl.protocol.xmpp.ChatRoomImpl.processPresence(ChatRoomImpl.java:872)
at org.jivesoftware.smackx.muc.MultiUserChat$3.processStanza(MultiUserChat.java:294)
at org.jivesoftware.smack.AbstractXMPPConnection.lambda$invokeStanzaCollectorsAndNotifyRecvListeners$8(AbstractXMPPConnection.java:1626)
at org.jivesoftware.smack.AsyncButOrdered$Handler.run(AsyncButOrdered.java:151)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:829)
And I am using this community plugin to promote users to moderators
Thank you so much for replying! This is my jicofo log.
I did disable enable-auto-owner = “false”
// Source: https://github.com/jitsi/docker-jitsi-meet/issues/825#issuecomment-729837002
jicofo {
// Configuration related to jitsi-videobridge
bridge {
// The maximum number of participants in a single conference to put on one bridge (use -1 for no maximum).
max-bridge-participants = "-1"
// The assumed maximum packet rate that a bridge can handle.
max-bridge-packet-rate = "50000"
// The assumed average packet rate per participant.
average-participant-packet-rate-pps = "500"
// The assumed average stress per participant.
average-participant-stress = "0.01"
// The assumed time that an endpoint takes to start contributing fully to the load on a bridge. To avoid allocating
// a burst of endpoints to the same bridge, the bridge stress is adjusted by adding the number of new endpoints
// in the last [participant-rampup-time] multiplied by [average-participant-stress].
participant-rampup-interval = "20 seconds"
// The stress level above which a bridge is considered overstressed.
stress-threshold = "0.08"
// The amount of to wait before retrying using a failed bridge.
failure-reset-threshold = "1 minute"
// The bridge selection strategy. The built-in strategies are:
// SingleBridgeSelectionStrategy: Use the least loaded bridge, do not split a conference between bridges (Octo).
// SplitBridgeSelectionStrategy: Use a separate bridge for each participant (for testing).
// RegionBasedBridgeSelectionStrategy: Attempt to put each participant in a bridge in their local region (i.e. use
// Octo for geo-location).
// IntraRegionBridgeSelectionStrategy: Use additional bridges when a bridge becomes overloaded (i.e. use Octo for
// load balancing).
//
// Additionally, you can use the fully qualified class name for custom BridgeSelectionStrategy implementations.
selection-strategy = "SingleBridgeSelectionStrategy"
health-checks {
// Whether jicofo should perform periodic health checks to the connected bridges.
enabled = true
// The interval at which to perform health checks.
interval = "10 seconds"
// When a health checks times out, jicofo will retry and only consider it fail after the retry fails. This
// configures the delay between the original health check timing out and the second health check being sent.
// It is a duration and defaults to half the [interval].
# retry-delay = 5 seconds
}
// The JID of the MUC to be used as a brewery for bridge instances.
brewery-jid = "JvbBrewery@internal.auth.meet.domain"
}
// Configure the codecs and RTP extensions to be used in the offer sent to clients.
codec {
video {
vp8 {
enabled = true
pt = 100
// Payload type for the associated RTX stream. Set to -1 to disable RTX.
rtx-pt = 96
}
# vp9 {
# enabled = true
# pt = 101
# // Payload type for the associated RTX stream. Set to -1 to disable RTX.
# rtx-pt = 97
# }
# h264 {
# enabled = true
# pt = 107
# // Payload type for the associated RTX stream. Set to -1 to disable RTX.
# rtx-pt = 99
# }
}
audio {
# isac-16000 {
# enabled = true
# pt = 103
# }
# isac-32000 {
# enabled = true
# pt = 104
# }
opus {
enabled = true
pt = 111
minptime = 10
use-inband-fec = true
red {
enabled = false
pt = 112
}
}
# telephone-event {
# enabled = true
# pt = 126
# }
}
// RTP header extensions: Real Time Transport Protocol(L7 proto work over L4(UDP))
rtp-extensions {
audio-level {
enabled = true
id = 1
}
tof {
// TOF is currently disabled, because we don't support it in the bridge
// (and currently clients seem to not use it when abs-send-time is
// available).
enabled = false
id = 2
}
abs-send-time {
enabled = true
id = 3
}
rid {
enabled = false
id = 4
}
tcc {
enabled = true
id = 5
}
video-content-type {
enabled = false
id = 7
}
framemarking {
enabled = false
id = 9
}
}
}
conference {
// Whether to automatically grant the 'owner' role to the first participant in the conference (and subsequently to
// the next in line when the current owner leaves).
enable-auto-owner = "false"
// How long to wait for the initial participant in a conference.
initial-timeout = "15 seconds"
// Whether jicofo should inject a random SSRC for endpoints which don't advertise any SSRCs. This is a temporary
// workaround for an issue with signaling endpoints for Octo.
// ssrc: Synchronization source identifier uniquely identifies the source of a stream.
inject-ssrc-for-recv-only-endpoints = false
max-ssrcs-per-user = 20
// How long a participant's media session will be kept alive once it remains the only participant in the room.
single-participant-timeout = 20 seconds
}
// Configuration for the internal health checks performed by jicofo.
health {
// Whether to perform health checks.
enabled = true
// The interval between health checks. If set to 0, periodic health checks will not be performed.
interval = "10 seconds"
# The timeout for a health check
timeout = "30 seconds"
# If performing a health check takes longer than this, it is considered unsuccessful.
max-check-duration = "20 seconds"
# The prefix to use when creating MUC rooms for the purpose of health checks.
room-name-prefix = "__jicofo-health-check"
}
jibri {
// The JID of the MUC to be used as a brewery for jibri instances for streaming.
brewery-jid = "jibribrewery@internal.auth.meet.domain"
// How many times to retry a given Jibri request before giving up. Set to -1 to allow infinite retries.
num-retries = "5"
// How long to wait for Jibri to start recording from the time it accepts a START request.
pending-timeout = "90"
}
rest {
host = "localhost"
port = 8888
}
task-pools {
shared-pool-max-threads = 1500
}
xmpp: {
client: {
enabled = true
hostname = "localhost"
domain = "auth.meet.domain"
username = "focus"
password = "85v1IDo6u1XdC2wS"
conference-muc-jid = "conference.meet.domain"
disable-certificate-verification = true
client-proxy: focus.meet.domain
}
trusted-domains: [ "recorder.meet.domain" ]
}
authentication: {
enabled: true
type: JWT
login-url: meet.domain
}
octo {
// Whether or not to use Octo. Note that when enabled, its use will be determined by
// $jicofo.bridge.selection-strategy. There's a corresponding flag in the JVB and these
// two MUST be in sync (otherwise bridges will crash because they won't know how to
// deal with octo channels).
enabled = False
id = "1"
}
}