Initial commit

This commit is contained in:
Mid 2024-12-21 22:43:09 +02:00
commit 6f26dced64
8 changed files with 23533 additions and 0 deletions

36
Makefile Normal file
View File

@ -0,0 +1,36 @@
.DEFAULT_GOAL := all
EMSCR := $(shell command -v emcmake 2> /dev/null)
LLVM_AR := $(shell command -v llvm-ar 2> /dev/null)
emscr:
ifndef EMSCR
$(error "Emscripten is not in PATH.")
endif
ifndef LLVM_AR
$(error "llvm-ar is not in PATH. Consider Emscripten's own binary in emsdk/upstream/bin.")
endif
ogg/libogg.a: emscr
test -e ogg || git clone https://github.com/xiph/ogg
cd ogg && emcmake cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS="-sMAYBE_WASM2JS -fPIC" ./ && make -j$(nproc)
vorbis/lib/libvorbis.a: emscr ogg/libogg.a
test -e vorbis || git clone https://github.com/xiph/vorbis
cd vorbis && emcmake cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS="-sMAYBE_WASM2JS -I../ogg/include/ -fPIC" -DOGG_INCLUDE_DIR="../ogg/include" -DOGG_LIBRARY="../libogg.a" -DBUILD_TESTING=0 ./ && make -j$(nproc)
FFmpeg/libswscale/libswscale.a: emscr
test -e FFmpeg || git clone https://github.com/FFmpeg/FFmpeg/
cd FFmpeg && git checkout 13129f1af4340944291e0e291cb38d1c1ea3aad2 && ./configure --cc=emcc --enable-cross-compile --target-os=none --arch=x86 --disable-runtime-cpudetect --disable-asm --disable-bzlib --disable-iconv --disable-libxcb --disable-lzma --disable-sdl2 --disable-securetransport --disable-xlib --disable-zlib --disable-network --disable-d3d11va --disable-dxva2 --disable-all --disable-everything --disable-ffmpeg --enable-avcodec --disable-avformat --enable-avutil --disable-avfilter --disable-swresample --enable-swscale --enable-decoder=vp8 --enable-decoder=vp9 --enable-shared --extra-cflags="-sMAYBE_WASM2JS -fPIC" --extra-cxxflags="-sMAYBE_WASM2JS -fPIC" && make -j$(nproc)
llvm-ar d FFmpeg/libswscale/libswscale.a log2_tab.o
llvm-ar d FFmpeg/libavutil/libavutil.a log2_tab.o
llvm-ar d FFmpeg/libavcodec/libavcodec.a reverse.o
wsrA: main.c mongoose.c
cc -s -O3 -D_GNU_SOURCE -o wsrA main.c mongoose.c
support.js: emscr ogg/libogg.a vorbis/lib/libvorbis.a FFmpeg/libswscale/libswscale.a
emcc -o support -fPIC -flto -IFFmpeg -Iogg/include -Ivorbis/include -LFFmpeg/libavcodec -l:libavcodec.a -LFFmpeg/libswscale -l:libswscale.a -LFFmpeg/libavutil -l:libavutil.a -Lvorbis/lib -l:libvorbis.a -Logg -l:libogg.a support.c -pthread -msimd128 -O3 -sMAYBE_WASM2JS -sUSE_PTHREADS=1 -sEXPORT_ALL=1 -sMAIN_MODULE=1 -sTOTAL_MEMORY=128MB
mv support support.js
all: support.js wsrA

406
blarf.js Normal file
View File

@ -0,0 +1,406 @@
(function() {
var VideoQueue = []
var AudioQueue = []
var BlarfEl = document.getElementById("BLARF")
BlarfEl.innerHTML = `
<canvas width="1280" height="720"></canvas>
<div class="MKVControls">
<div class="MKVSpeaker"><span class="MKVSpeakerOff">🔈&#xFE0E;</span><span class="MKVSpeakerOn" style="display:none;">🔊&#xFE0E;</span></div>
<span class="MKVCurrentTime">00:00:00</span>
<span class="MKVStats"></span>
</div>
`
var Canvus = BlarfEl.querySelector("canvas")
var CanvCtx = Canvus.getContext("2d")
var AudCtx
var AudScript
function create_audio(hz, channels) {
if(AudCtx) {
AudCtx.close()
}
AudCtx = new AudioContext({sampleRate: hz})
AudScript = AudCtx.createScriptProcessor(1024, 2, 2)
AudScript.onaudioprocess = function(e) {
var outL = e.outputBuffer.getChannelData(0)
var outR = channels > 1 ? e.outputBuffer.getChannelData(1) : null
var leftToWrite = outL.length
var offset = 0
while(AudioQueue.length && leftToWrite) {
var amount = Math.min(leftToWrite, AudioQueue[0].left.length)
outL.set(AudioQueue[0].left.subarray(0, amount), offset)
if(outR) outR.set(AudioQueue[0].right.subarray(0, amount), offset)
AudioQueue[0].left = AudioQueue[0].left.subarray(amount)
if(outR) AudioQueue[0].right = AudioQueue[0].right.subarray(amount)
if(AudioQueue[0].left.length == 0) {
AudioQueue.shift()
}
leftToWrite -= amount
offset += amount
}
if(RenderStartTime && leftToWrite) {
buffering(1000)
}
}
AudScript.connect(AudCtx.destination)
}
var LastControlsInterrupt
function interruptcontrols() {
LastControlsInterrupt = document.timeline.currentTime
}
interruptcontrols()
function togglemute() {
if(AudCtx)
if(document.querySelector(".MKVSpeakerOn").style.display == "none") {
AudCtx.resume()
} else {
AudCtx.suspend()
}
document.querySelectorAll(".MKVSpeaker *").forEach(function(el) { el.style.display = el.style.display == "none" ? "" : "none" })
interruptcontrols()
}
document.querySelector(".MKVSpeaker").onclick = togglemute
document.onkeypress = function(e) {
if(e.key.toUpperCase() == "M") {
togglemute()
}
}
BlarfEl.onmousemove = function() {
interruptcontrols()
}
var RenderStartTime, VideoStartTime
var Statistics = {}
var TheWorker = new Worker("blarfwork.js")
TheWorker.onmessage = function(e) {
if(e.data.width) {
var imgData = new ImageData(new Uint8ClampedArray(e.data.data.buffer), e.data.width, e.data.height, {colorSpace: "srgb"})
VideoQueue.push({t: e.data.t, imgData: imgData})
} else if(e.data.samples) {
AudioQueue.push({left: e.data.left, right: e.data.right || e.data.left})
// Audio may be loaded but it might not play because of autoplay permissions
// In this case the audio queue will fill up and cause ever-increasing AV desync
// To prevent this, manually crop the audio to the duration in the video queue
if(AudCtx && AudCtx.state != "running") {
var durationInAudioQueue = AudioQueue.length ? AudioQueue.reduce((acc, el) => acc + el.left.length, 0) : 0
var durationToRemove = Math.max(durationInAudioQueue - (VideoQueue.length ? (VideoQueue[VideoQueue.length - 1].t - VideoQueue[0].t) : 0) * 48, 0)
while(AudioQueue.length && durationToRemove) {
var amount = Math.min(durationToRemove, AudioQueue[0].left.length)
AudioQueue[0].left = AudioQueue[0].left.subarray(amount)
AudioQueue[0].right = AudioQueue[0].left.subarray(amount)
if(AudioQueue[0].left.length == 0) {
AudioQueue.shift()
}
durationToRemove -= amount
}
}
}
if(!Statistics[e.data.id]) {
Statistics[e.data.id] = {sum: 0, count: 0}
}
Statistics[e.data.id].sum += e.data.taken
Statistics[e.data.id].count++
var stats = document.querySelector(".MKVStats")
if(stats) {
/* var text = ""
for(var k in Statistics) {
text = text + k + ":" + (Math.floor(100 * Statistics[k].sum / Statistics[k].count) / 100) + ","
}
stats.innerText = text*/
stats.innerHTML = (VideoQueue.length ? (VideoQueue[VideoQueue.length - 1].t - VideoQueue[0].t) : "0") + "v" + (AudioQueue.reduce(function(acc, obj) {return acc + obj.left.length / 48}, 0)|0) + "a"
}
}
Canvus.onclick = function() {
if(AudCtx) AudCtx.resume()
}
var VideoBufferingOffset = 0
function buffering(millis) {
//var silence = new Float32Array(millis * 48);
//AudioQueue.push({left: silence, right: silence})
//VideoBufferingOffset += millis
}
function toHex(buffer) {
return Array.prototype.map.call(buffer, x => ('00' + x.toString(16)).slice(-2)).join('');
}
function pad(str, n, z) {
z = z || '0'
str = str + ''
while(str.length < n) {
str = z + str
}
return str
}
class EBMLParser {
Accum = new Uint8Array([])
I = 0
IdStack = []
SizeStack = []
get_varint() {
if(this.Accum.length == 0) return null;
var bytes = Math.clz32(this.Accum[this.I]) - 23
if(this.Accum.length - this.I < bytes) return null;
var ret = this.Accum.subarray(this.I, this.I + bytes).slice(0)
this.I += bytes
return ret
}
poosh(toAdd) {
var a = this.Accum
this.Accum = new Uint8Array(a.length + toAdd.length)
this.Accum.set(a)
this.Accum.set(toAdd, a.length)
}
parse() {
do {
var IOld = this.I
var elID = this.get_varint()
if(elID === null) {
this.I = IOld
break
}
elID = EBMLParser.vi_to_i(elID)
var elSize = this.get_varint()
if(elSize === null) {
this.I = IOld
break
}
EBMLParser.parse_varint(elSize)
elSize = EBMLParser.vi_to_i(elSize)
if(elID == 0x18538067 || elID == 0x114D9B74 || elID == 0x1549A966 || elID == 0x1F43B675 || elID == 0x1654AE6B || elID == 0xE0 || elID == 0xE1 || elID == 0xAE) {
// tree
this.IdStack.push(elID)
this.SizeStack.push(elSize + (this.I - IOld))
if(this.onenter) {
this.onenter(elID)
}
} else {
// binary
if(this.Accum.length - this.I >= elSize) {
if(this.ondata) {
this.ondata(elID, this.Accum.subarray(this.I, this.I + elSize))
}
this.I += elSize
} else {
this.I = IOld
break
}
}
for(var i = 0; i < this.IdStack.length; i++) {
this.SizeStack[i] -= this.I - IOld
}
while(this.SizeStack.length && this.SizeStack[this.SizeStack.length - 1] <= 0) {
if(this.SizeStack[this.SizeStack.length] - 1 < 0) console.log("Shit")
if(this.onexit) {
this.onexit(this.IdStack[this.IdStack.length - 1])
}
this.SizeStack.pop()
this.IdStack.pop()
}
} while(true);
this.Accum = this.Accum.subarray(this.I)
this.I = 0
}
static parse_varint(vi) {
vi[0] = vi[0] & ((1 << (31 - Math.clz32(vi[0]))) - 1)
}
static vi_to_i(vi) {
var ret = 0
for(var i = 0; i < vi.length; i++) {
ret = ret * 256 + vi[i]
}
return ret
}
}
class MatroskaState {
tracks = []
onenter(elID) {
if(elID == 0xAE) {
// Track Entry
this.tracks.push({})
} else if(elID == 0xE0) {
// Track Entry -> Track Video
this.tracks[this.tracks.length - 1].type = "video"
} else if(elID == 0xE1) {
// Track Entry -> Track Audio
this.tracks[this.tracks.length - 1].type = "audio"
}
}
ondata(elID, data) {
if(EBMLParser.vi_to_i(data) == 48000) debugger
if(elID == 0xD7) {
// Track Entry -> Track Number
this.tracks[this.tracks.length - 1].id = EBMLParser.vi_to_i(data)
} else if(elID == 0xB0) {
// Track Entry -> Track Video -> Width
this.tracks[this.tracks.length - 1].width = EBMLParser.vi_to_i(data)
} else if(elID == 0xBA) {
// Track Entry -> Track Video -> Height
this.tracks[this.tracks.length - 1].height = EBMLParser.vi_to_i(data)
} else if(elID == 0x9F) {
// Track Entry -> Track Audio -> Channels
this.tracks[this.tracks.length - 1].channels = EBMLParser.vi_to_i(data)
} else if(elID == 0xB5) {
// Track Entry -> Track Audio -> Sampling Frequency
var dv = new DataView(data.slice(0).buffer)
this.tracks[this.tracks.length - 1].samplerate = data.length == 4 ? dv.getFloat32(0, false) : dv.getFloat64(0, false)
} else if(elID == 0x86) {
// Track Entry -> Codec Type
this.tracks[this.tracks.length - 1].codec = new TextDecoder().decode(data);
} else if(elID == 0x63A2) {
// Track Entry -> Codec Private
this.tracks[this.tracks.length - 1].priv = data.slice(0)
} else if(elID == 0xE7) {
// Cluster -> Timestamp
this.currentClusterTime = EBMLParser.vi_to_i(data)
if(!RenderStartTime) {
RenderStartTime = document.timeline.currentTime + 1000
}
if(!VideoStartTime) {
VideoStartTime = this.currentClusterTime
}
} else if(elID == 0xA3) {
// Cluster -> SimpleBlock
var trackID = data[0] & 127
var track = this.tracks.find(function(t) {return t.id == trackID})
var timestamp = data[1] * 256 + data[2]
var flags = data[3]
var kf = !!(flags & 128)
var TotalTime = (this.currentClusterTime + timestamp) / 1000
document.querySelector(".MKVCurrentTime").innerText = pad(Math.floor(TotalTime / 3600), 2) + ":" + pad(Math.floor(TotalTime / 60 % 60), 2) + ":" + pad(Math.floor(TotalTime % 60), 2)
if(track) {
var packet = data.subarray(4)
TheWorker.postMessage({cmd: "decode", id: trackID, t: timestamp + this.currentClusterTime - VideoStartTime, packet: packet, kf: kf})
}
}
}
onexit(elID) {
if(elID == 0xAE) {
// Track Entry
var track = this.tracks[this.tracks.length - 1]
var codec = track.codec
var id = track.id
var priv = track.priv
var channels = track.samples // undefined if not audio
TheWorker.postMessage({cmd: "create", codec: codec, id: id, priv: priv, channels: channels})
if(track.type == "video") {
Canvus.width = track.width
Canvus.height = track.height
} else {
create_audio(track.samplerate, track.channels)
}
}
}
}
var matr = new MatroskaState()
var ebml = new EBMLParser()
ebml.onenter = matr.onenter.bind(matr)
ebml.ondata = matr.ondata.bind(matr)
ebml.onexit = matr.onexit.bind(matr)
function reconnect_ws() {
var ws = new WebSocket(BlarfEl.getAttribute("data-target"))
ws.binaryType = "arraybuffer"
ws.onmessage = function(ev) {
ebml.poosh(new Uint8Array(ev.data))
}
ws.onclose = function(ev) {
setTimeout(reconnect_ws, 5000)
}
}
reconnect_ws()
function render(timestamp) {
ebml.parse()
document.querySelector(".MKVControls").style.opacity = Math.max(0, Math.min(1, 5 - (timestamp - LastControlsInterrupt) / 1000))
while(RenderStartTime && VideoQueue.length && VideoQueue[0].t + VideoBufferingOffset <= (timestamp - RenderStartTime)) {
CanvCtx.putImageData(VideoQueue[0].imgData, 0, 0)
VideoQueue.shift()
}
requestAnimationFrame(render)
}
requestAnimationFrame(render)
})()

98
blarfwork.js Normal file
View File

@ -0,0 +1,98 @@
var Initialized = false
var Module = {
onRuntimeInitialized: function() {
Initialized = true
respond_to_messages()
}
}
function vi_to_i_le(vi) {
var ret = 0
for(var i = vi.length - 1; i >= 0; i--) {
ret = ret * 256 + vi[i]
}
return ret
}
var Codecs = {}
var ReceivedMessages = []
function respond_to_messages() {
if(!Initialized) {
return
}
while(ReceivedMessages.length) {
var msg = ReceivedMessages[0]
if(msg.cmd == "create") {
if(msg.codec == "V_VP8") {
var ptr = Module._alloc_codec_vpx(0);
Codecs[msg.id] = {type : msg.codec, ptr : ptr}
} else if(msg.codec == "V_VP9") {
var ptr = Module._alloc_codec_vpx(1);
Codecs[msg.id] = {type : msg.codec, ptr : ptr}
} else if(msg.codec == "A_VORBIS") {
var privptr = Module._av_mallocz(msg.priv.length)
Module.HEAPU8.set(msg.priv, privptr)
Codecs[msg.id] = {type: msg.codec, ptr: Module._alloc_codec_vorbis(privptr, msg.priv.length), channels: msg.channels}
Module._av_free(privptr)
}
} else if(msg.cmd == "decode" && Codecs[msg.id]) {
var packet = Module._av_mallocz(msg.packet.length)
Module.HEAPU8.set(msg.packet, packet)
if(Codecs[msg.id].type == "V_VP8" || Codecs[msg.id].type == "V_VP9") {
var startTime = performance.now()
//debugger
var status = Module._codec_vpx_push_packet(Codecs[msg.id].ptr, +msg.packet.kf, packet, msg.packet.length)
var taken = performance.now() - startTime
if(status) {
var rgbaPtr = vi_to_i_le(Module.HEAPU8.subarray(Codecs[msg.id].ptr + 0, Codecs[msg.id].ptr + 4));
var w = vi_to_i_le(Module.HEAPU8.subarray(Codecs[msg.id].ptr + 4, Codecs[msg.id].ptr + 6));
var h = vi_to_i_le(Module.HEAPU8.subarray(Codecs[msg.id].ptr + 6, Codecs[msg.id].ptr + 8));
postMessage({t: msg.t, id: msg.id, width: w, height: h, data: Module.HEAPU8.subarray(rgbaPtr, rgbaPtr + 4 * w * h).slice(0), taken: taken})
}
} else if(Codecs[msg.id].type == "A_VORBIS") {
var startTime = performance.now()
var samples = Module._codec_vorbis_push_packet(Codecs[msg.id].ptr, packet, msg.packet.length)
var sampleChannelsPtr = vi_to_i_le(Module.HEAPU8.subarray(Codecs[msg.id].ptr, Codecs[msg.id].ptr + 4))
var sampleChannelLPtr = vi_to_i_le(Module.HEAPU8.subarray(sampleChannelsPtr + 0, sampleChannelsPtr + 4));
var sampleChannelRPtr = Codecs[msg.id].channels == 2 ? vi_to_i_le(Module.HEAPU8.subarray(sampleChannelsPtr + 4, sampleChannelsPtr + 8)) : undefined;
var left = Module.HEAPF32.subarray(sampleChannelLPtr / 4, sampleChannelLPtr / 4 + samples).slice(0)
var right = sampleChannelRPtr && Module.HEAPF32.subarray(sampleChannelRPtr / 4, sampleChannelRPtr / 4 + samples).slice(0)
//debugger;
postMessage({t: msg.t, id: msg.id, samples: samples, left: left, right: right, taken: performance.now() - startTime}, right ? [left.buffer, right.buffer] : [left.buffer])
}
Module._av_free(packet)
}
ReceivedMessages.shift()
}
}
onmessage = function(e) {
ReceivedMessages.push(e.data)
respond_to_messages()
}
importScripts("support.js")

121
index.html Normal file
View File

@ -0,0 +1,121 @@
<!DOCTYPE html>
<html>
<head>
<style>
@media(prefers-color-scheme: dark) {
body {
background: black;
}
}
div.everything {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
min-width: 95vw;
}
div.stream {
display: flex;
flex-direction: row;
}
div.stream > div#BLARF {
}
div.stream > div.chat {
flex-grow: 1;
}
div#BLARF {
position: relative;
font-family: sans-serif;
width: fit-content;
}
div#BLARF .MKVControls {
position: absolute;
bottom: 0.5%;
left: 0%;
margin-left: 0;
right: 0%;
width: 100%;
color: white;
font-size: 0.4cm;
background: rgb(0, 0, 0);
background: linear-gradient(0deg, rgba(0, 0, 0, 1) 0%, rgba(0, 0, 0, 0) 100%);
}
div#BLARF .MKVControls > * {
vertical-align: middle;
}
div#BLARF .MKVSpeaker {
width: 1em;
text-align: center;
display: inline-block;
cursor: pointer;
font-size: 0.75cm;
}
div#BLARF > canvas {
background: url(Intermission2.jpg) black;
background-position: 0 30%;
background-size: cover;
width: 100%;
}
/* Pretty bad hack, but this way Converse.js can be unchanged */
div.chatbox-title__text {
text-indent: -99999px;
line-height: 0 !important;
}
div.chatbox-title__text::after {
content: "Chat";
text-indent: 0px;
display: block;
line-height: initial;
}
</style>
<link rel="stylesheet" type="text/css" media="screen" href="https://cdn.conversejs.org/10.1.4/dist/converse.min.css">
<meta charset="UTF-8" />
<title>Title</title>
</head>
<body>
<div class="everything">
<header></header>
<div class="stream">
<div class="feed">
<div id="BLARF" data-target="wss://iki.mid.net.ua/streamout/"></div>
</div>
<div class="chat">
<converse-root style="position: relative;"></converse-root>
</div>
</div>
<footer></footer>
</div>
<script src="https://cdn.conversejs.org/10.1.4/dist/converse.min.js" charset="utf-8"></script>
<script src="blarf.js"></script>
<script>
function randomHex(size) {
return [...self.crypto.getRandomValues(new Uint8Array(size))].map(b=>b.toString(16).padStart(2, "0")).join("");
}
const un = 'lol' + randomHex(16)
converse.initialize({
view_mode: 'embedded',
websocket_url: 'wss://mid.net.ua/xmpp',
login: 'anonymous',
jid: un + '@anon.mid.net.ua',
auto_login: true,
password: 'lol',
auto_join_rooms: ['stream@muc.anon.mid.net.ua'],
show_message_avatar: false,
show_controlbox_by_default: false,
roster_groups: false,
blacklisted_plugins: ['converse-controlbox', 'converse-fullscreen'],
singleton: true,
discover_connection_methods: false,
keepalive: false,
auto_reconnect: true
})
</script>
</body>
</html>

167
main.c Normal file
View File

@ -0,0 +1,167 @@
#include"mongoose.h"
#include<getopt.h>
#include<stdio.h>
#include<string.h>
#include<stdbool.h>
typedef enum {
LOADING_HEADER,
STREAMING,
} State;
static const uint8_t *STATE_CHANGE_STRING[] = {
[LOADING_HEADER] = "\x1F\x43\xB6\x75",
[STREAMING] = "\x1A\x45\xDF\xA3",
};
static State state = STREAMING;
static int stateChangeIdx;
static char *header;
static size_t headerSize;
static struct mg_connection *streamerConnected = NULL;
static struct {
char *wslisten;
char *tcplisten;
char *tlscert;
char *tlsca;
} settings;
static void ws_broadcast(struct mg_mgr *mgr, const char *data, size_t len) {
for(struct mg_connection *cli = mgr->conns; cli; cli = cli->next) {
if(cli->is_websocket) {
mg_ws_send(cli, data, len, WEBSOCKET_OP_BINARY);
}
}
}
static void fn(struct mg_connection *c, int ev, void *ev_data) {
if(ev == MG_EV_ACCEPT) {
if(mg_url_is_ssl(c->is_websocket ? settings.wslisten : settings.tcplisten)) {
struct mg_tls_opts opts = {.ca = mg_unpacked(settings.tlsca), .cert = mg_unpacked(settings.tlscert), .key = mg_unpacked(settings.tlscert)};
mg_tls_init(c, &opts);
}
} else if(ev == MG_EV_CLOSE) {
if(c == streamerConnected) {
streamerConnected = NULL;
}
} else if(ev == MG_EV_HTTP_MSG) {
struct mg_http_message *hm = (struct mg_http_message *) ev_data;
mg_ws_upgrade(c, hm, NULL);
} else if(ev == MG_EV_WS_OPEN) {
if(state == STREAMING && header) {
mg_ws_send(c, header, headerSize, WEBSOCKET_OP_BINARY);
}
} else if(ev == MG_EV_WS_MSG) {
// Incoming WS messages are ignored.
} else if(ev == MG_EV_READ) {
if(!c->is_websocket) {
if(streamerConnected && streamerConnected != c) {
c->is_closing = 1;
} else {
streamerConnected = c;
}
} else return;
struct mg_iobuf *r = &c->recv;
if(state == LOADING_HEADER) {
header = realloc(header, headerSize + r->len);
memcpy(header + headerSize, r->buf, r->len);
headerSize += r->len;
char *clusterEl = memmem(header, headerSize, "\x1F\x43\xB6\x75", 4);
if(clusterEl) {
ws_broadcast(c->mgr, header, clusterEl - header);
ws_broadcast(c->mgr, clusterEl, header + headerSize - clusterEl);
headerSize = clusterEl - header;
state = STREAMING;
}
} else {
int i;
for(i = 0; i < r->len; i++) {
if(r->buf[i] == STATE_CHANGE_STRING[state][stateChangeIdx]) {
stateChangeIdx++;
if(stateChangeIdx == strlen(STATE_CHANGE_STRING[state])) {
i++;
stateChangeIdx = 0;
state = LOADING_HEADER;
break;
}
} else {
stateChangeIdx = 0;
}
}
if(state == LOADING_HEADER) {
if(i > 4) {
ws_broadcast(c->mgr, r->buf, i - 4);
}
header = realloc(header, headerSize = 4 + (r->len - i));
memcpy(header, STATE_CHANGE_STRING[STREAMING], 4);
memcpy(header + 4, r->buf + i, r->len - i);
} else {
ws_broadcast(c->mgr, r->buf, r->len);
}
}
r->len = 0;
}
}
int main(int argc, char **argv) {
int help = 0, err = 0;
int c;
while((c = getopt(argc, argv, "a:c:i:o:h")) != -1) {
if(c == 'i') {
settings.tcplisten = optarg;
} else if(c == 'o') {
settings.wslisten = optarg;
} else if(c == 'a') {
settings.tlsca = optarg;
} else if(c == 'c') {
settings.tlscert = optarg;
} else if(c == 'h') {
help = 1;
}
}
if(help) {
fprintf(stderr, "Example usage: %s [-c /path/to/cert.pem] [-a /path/to/certauthority.pem] [-h] <-i tcp://[::]:1234> <-o ws://[::]:8000>\n", argv[0]);
return;
}
if(!settings.wslisten) {
fputs("Missing -o parameter. Try -h for help.\n", stderr);
err = 1;
}
if(!settings.tcplisten) {
fputs("Missing -i parameter. Try -h for help\n", stderr);
err = 1;
}
if(err) {
return err;
}
struct mg_mgr mgr;
mg_mgr_init(&mgr);
mg_listen(&mgr, settings.tcplisten, fn, NULL);
mg_http_listen(&mgr, settings.wslisten, fn, NULL);
for (;;) mg_mgr_poll(&mgr, 1000);
mg_mgr_free(&mgr);
return 0;
}

19313
mongoose.c Normal file

File diff suppressed because it is too large Load Diff

3220
mongoose.h Normal file

File diff suppressed because it is too large Load Diff

172
support.c Normal file
View File

@ -0,0 +1,172 @@
#include<stdlib.h>
#include<libavcodec/avcodec.h>
#include<libswscale/swscale.h>
#include<libavutil/imgutils.h>
#include<ogg/ogg.h>
#include<vorbis/codec.h>
struct veepie {
uint8_t *rgba;
uint16_t w;
uint16_t h;
uint16_t wOld, hOld;
const AVCodec *cdc;
AVCodecContext *ctx;
AVFrame *frame1, *frame2;
AVPacket pkt;
SwsContext *sws;
};
struct veepie *alloc_codec_vpx(int vp9) {
struct veepie *v = calloc(1, sizeof(*v));
v->cdc = avcodec_find_decoder_by_name(vp9 ? "vp9" : "vp8");
if(!v->cdc) return (void*) (uintptr_t) 1;
v->ctx = avcodec_alloc_context3(v->cdc);
if(!v->ctx) return (void*) (uintptr_t) 2;
if(avcodec_open2(v->ctx, v->cdc, 0) < 0) return (void*) (uintptr_t) 3;
v->frame1 = av_frame_alloc();
v->frame2 = av_frame_alloc();
return v;
}
int codec_vpx_push_packet(struct veepie *v, int kf, uint8_t *data, size_t len) {//, uint64_t pts) {
av_init_packet(&v->pkt);
v->pkt.data = data;
v->pkt.size = len;
v->pkt.pts = AV_NOPTS_VALUE;
v->pkt.dts = AV_NOPTS_VALUE;
if(kf) v->pkt.flags |= AV_PKT_FLAG_KEY;
avcodec_send_packet(v->ctx, &v->pkt);
av_packet_unref(&v->pkt);
int retframe = avcodec_receive_frame(v->ctx, v->frame1);
if(retframe) {
return 0;
}
v->w = v->frame1->width;
v->h = v->frame1->height;
if(v->w != v->wOld || v->h != v->hOld) {
v->sws = sws_getContext(v->w, v->h, v->frame1->format, v->w, v->h, AV_PIX_FMT_RGBA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
av_image_fill_arrays(v->frame2->data, v->frame2->linesize, av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGBA, v->w, v->h, 4)), AV_PIX_FMT_RGBA, v->w, v->h, 4);
v->wOld = v->w;
v->hOld = v->h;
}
sws_scale(v->sws, v->frame1->data, v->frame1->linesize, 0, v->h, v->frame2->data, v->frame2->linesize);
v->rgba = v->frame2->data[0];
return 1;
}
struct vobie {
float **sampleBuffer;
uint32_t sampleCount;
vorbis_info vi;
vorbis_comment vc;
ogg_packet op;
vorbis_dsp_state dsp;
vorbis_block vb;
};
struct vobie *alloc_codec_vorbis(uint8_t *private, size_t privateLen) {
uint8_t numPacketsMinusOne = private[0];
if(numPacketsMinusOne != 2) return NULL;
size_t i = 1;
size_t len0 = 0;
while(private[i] == 0xFF) {
len0 += 0xFF;
i++;
}
len0 += private[i++];
size_t len1 = 0;
while(private[i] == 0xFF) {
len1 += 0xFF;
i++;
}
len1 += private[i++];
size_t len2 = privateLen - i - len0 - len1;
struct vobie *v = calloc(1, sizeof(*v));
vorbis_info_init(&v->vi);
vorbis_comment_init(&v->vc);
v->op.packet = private + i;
v->op.bytes = len0;
v->op.b_o_s = 1;
v->op.e_o_s = 0;
v->op.granulepos = 0;
v->op.packetno = 0;
if(vorbis_synthesis_headerin(&v->vi, &v->vc, &v->op)) {
return (void*) (uintptr_t) 1;
}
i += len0;
v->op.packet = private + i;
v->op.bytes = len1;
v->op.b_o_s = 0;
v->op.packetno++;
if(vorbis_synthesis_headerin(&v->vi, &v->vc, &v->op)) {
return (void*) (uintptr_t) 2;
}
i += len1;
v->op.packet = private + i;
v->op.bytes = len2;
v->op.packetno++;
if(vorbis_synthesis_headerin(&v->vi, &v->vc, &v->op)) {
return (void*) (uintptr_t) 3;
}
if(vorbis_synthesis_init(&v->dsp, &v->vi)) {
return (void*) (uintptr_t) 4;
}
if(vorbis_block_init(&v->dsp, &v->vb)) {
return (void*) (uintptr_t) 5;
}
return v;
}
int codec_vorbis_push_packet(struct vobie *v, uint8_t *pkt, size_t len) {
v->op.packet = pkt;
v->op.bytes = len;
v->op.packetno++;
if(vorbis_synthesis(&v->vb, &v->op)) {
return -1;
}
if(vorbis_synthesis_blockin(&v->dsp, &v->vb)) {
return -2;
}
v->sampleCount = vorbis_synthesis_pcmout(&v->dsp, &v->sampleBuffer);
v->op.granulepos += v->sampleCount;
vorbis_synthesis_read(&v->dsp, v->sampleCount);
return v->sampleCount;
}