diff --git a/404.html b/404.html new file mode 100644 index 0000000..f8414f0 --- /dev/null +++ b/404.html @@ -0,0 +1,3 @@ + +404 Not Found +

404 Not Found

diff --git a/CSS/custom.css b/CSS/custom.css new file mode 100644 index 0000000..ef6eac1 --- /dev/null +++ b/CSS/custom.css @@ -0,0 +1,221 @@ +@media (prefers-color-scheme: light) { + :root { + color-scheme: light; + --main: #002ca5; + --light: #1f232b; + --lighter: #0c1217; + --dark: #a3c6f7; + --accent: #d9e1e8; + --visited: #5178e4; + } + } + /* Dark theme */ + @media (prefers-color-scheme: dark) { + :root{ + color-scheme: dark; + --main: #2b90d9; + --light: #9baec8; + --lighter: #d9e1e8; + --dark: #1f232b; + --accent: #5178e4; + } + } + + /* ----------BODY ITEMS---------- */ + body { + background-color: var(--dark); + color: var(--light); + } + + body h1 a, body header h2 a { + color: var(--lighter); + } + + body h1 a:hover, body header h2 a:hover { + color: var(--light); + } + + body#post article h2, body#post article h3, body#post article h4, body#post article h5 { + color: var(--lighter); + } + body footer nav { + color: var(--light); + } + + body #post footer nav a, body#collection footer nav a, body#post footer nav a, body#subpage footer nav a { + margin-top: 0; + color: var(--lighter); + } + + /* font was .86*/ + body #post code, body#collection code, body#post code, body#subpage code { + background-color: var(--light); + border: 1px solid var(--light); + color: var(--dark); + padding: .1em .2em; + font-size: .20em; + -webkit-border-radius: .25em; + -moz-border-radius: .25em; + border-radius: .025em; + + } + + + body#collection a.read-more, body#subpage a.read-more { + color: var(--lighter); + } + + /* ----------CUSTOM---------- */ + + + + body #post .alert, #subpage .alert, body#collection article, body#post article, body#subpage #wrapper h1, body#subpage article, pre { + max-width: 70rem; + margin: 0 auto; + margin-bottom: 0px; + } + + .hljs, body#post article, pre { + font-size: 0.9em; + padding: 0.6em; + } + + /* ----------END CUSTOM------ */ + + + /* ----------END BODY ITEMS---------- */ + + + /* ----------NAVIGATION & LINKS---------- */ + a { + color: var(--main); + } + + a.user.hidden.pin.action, a.user.hidden.delete.action, a.user.hidden.action { + color: var(--light); + } + + a.pinned { + color: var(--lighter); + } + + a.btn.cta, a.btn.submit, a.btn[type="submit"], button.cta, button.submit, button[type="submit"], input.cta, input.submit, input[type="submit"], select.inputform.cta, select.inputform.submit, select.inputform[type="submit"], textarea.inputform.cta, textarea.inputform.submit, textarea.inputform[type="submit"] { + border: 1px solid var(--light); + background: var(--light); + color: var(--dark); + } + + a.btn.cta:hover, a.btn.submit:hover, a.btn[type="submit"]:hover, button.cta:hover, button.submit:hover, button[type="submit"]:hover, input.cta:hover, input.submit:hover, input[type="submit"]:hover, select.inputform.cta:hover, select.inputform.submit:hover, select.inputform[type="submit"]:hover, textarea.inputform.cta:hover, textarea.inputform.submit:hover, textarea.inputform[type="submit"]:hover { + border: 1px solid var(--lighter); + background-color: var(--lighter); + text-decoration: none; + } + + .post-title a:link, .post-title a:visited { + color: var(--lighter); + } + + #post nav a:not(.home), header nav a { + color: var(--light); + } + + nav#manage ul a { + display: block; + color: var(--lighter); + background-color: var(--accent); + padding: 0 .5em; + margin: 0; + } + + nav#manage ul a:hover { + color: var(--light); + } + + nav#manage ul ul { + background: var(--accent); + } + + /* ----------CUSTOM---------- */ + + body#collection nav#manage li a.write { + font-family:Lora,palatino linotype,book antiqua,new york,'dejavu serif',serif + } + #wrapper { + max-width:80em; + } + + nav#post .alert, #subpage .alert, body#collection article, body#post article, body#subpage #wrapper h1, body#subpage article, pre { + max-width: 70rem; + margin: 0 auto; + margin-bottom: 0px; + + } + + /* ----------END CUSTOM------ */ + + + /* ----------END NAVIGATION & LINKS---------- */ + + + + /* ----------HEADERS---------- */ + + #official-writing h2, #official-writing h3, #official-writing h4, #wrapper h2, #wrapper h3, #wrapper h4 { + color: var(--lighter); + } + + header p.description { + color: var(--light); + } + /* ----------END HEADERS---------- */ + + + /* ----------TABLES---------- */ + th { + background: var(--main); + color: var(--dark); + padding-left: 10px; + padding-right: 10px; + } + td { + border: solid 1px var(--accent); + padding-left: 10px; + padding-right: 10px; + } + /* ----------END TABLES---------- */ + + h1 { + font-size: 2rem; + margin-top: 0.5em; + margin-bottom: 0em; + } + + h2 { + font-size: 1.8rem; + margin-top: 0.5em; + margin-bottom: 0em; + } + + h3 { + font-size: 1.5rem; + margin-top: 0.5em; + margin-bottom: 0em; + } + + h4 { + font-size: 1.44rem; + margin-top: 0.5em; + margin-bottom: 0em; + } + + h5 { + font-size: 1.15rem; + margin-top: 0.5em; + margin-bottom: 0em; + } + + h6 { + font-size: 0.96rem; + margin-top: 0.5em; + margin-bottom: 0em; + } \ No newline at end of file diff --git a/CSS/simple.css b/CSS/simple.css new file mode 100644 index 0000000..b89ca00 --- /dev/null +++ b/CSS/simple.css @@ -0,0 +1,677 @@ +/* Global variables. */ +:root, +::backdrop { + /* Set sans-serif & mono fonts */ + --sans-font: -apple-system, BlinkMacSystemFont, "Avenir Next", Avenir, + "Nimbus Sans L", Roboto, "Noto Sans", "Segoe UI", Arial, Helvetica, + "Helvetica Neue", sans-serif; + --mono-font: Consolas, Menlo, Monaco, "Andale Mono", "Ubuntu Mono", monospace; + --standard-border-radius: 5px; + + /* Default (light) theme */ + --bg: #fff; + --accent-bg: #f5f7ff; + --text: #212121; + --text-light: #585858; + --border: #898EA4; + --accent: #0d47a1; + --code: #d81b60; + --preformatted: #444; + --marked: #ffdd33; + --disabled: #efefef; +} + +/* Dark theme */ +@media (prefers-color-scheme: dark) { + :root, + ::backdrop { + color-scheme: dark; + --bg: #212121; + --accent-bg: #2b2b2b; + --text: #dcdcdc; + --text-light: #ababab; + --accent: #ffb300; + --code: #f06292; + --preformatted: #ccc; + --disabled: #111; + } +} + +/* Reset box-sizing */ +*, *::before, *::after { + box-sizing: border-box; +} + +/* Reset default appearance */ +textarea, +select, +input, +progress { + appearance: none; + -webkit-appearance: none; + -moz-appearance: none; +} + +html { + /* Set the font globally */ + font-family: var(--sans-font); + scroll-behavior: smooth; +} + +/* Make the body a nice central block */ +body { + color: var(--text); + background-color: var(--bg); + font-size: 1.15rem; + line-height: 1.5; + margin: auto; + max-width: 70em; + padding: 3em; +} + + +/* Make the header bg full width, but the content inline with body */ +body > header { + background-color: var(--accent-bg); + border-bottom: 1px solid var(--border); + text-align: center; + padding: 0 0.5rem 2rem 0.5rem; +} + +body > header h1 { + max-width: 1200px; + margin: 1rem auto; +} + +body > header p { + max-width: 40rem; + margin: 1rem auto; +} + +/* Add a little padding to ensure spacing is correct between content and header > nav */ +main { + padding-top: 1.5rem; +} + +body > footer { + margin-top: 4rem; + padding: 2rem 1rem 1.5rem 1rem; + color: var(--text-light); + font-size: 0.9rem; + text-align: center; + border-top: 1px solid var(--border); +} + +/* Format headers */ +h1 { + font-size: 3rem; +} + +h2 { + font-size: 2.6rem; + margin-top: 3rem; +} + +h3 { + font-size: 2rem; + margin-top: 3rem; +} + +h4 { + font-size: 1.44rem; +} + +h5 { + font-size: 1.15rem; +} + +h6 { + font-size: 0.96rem; +} + +/* Prevent long strings from overflowing container */ +p, h1, h2, h3, h4, h5, h6 { + overflow-wrap: break-word; +} + +/* Fix line height when title wraps */ +h1, +h2, +h3 { + line-height: 1.1; +} + +/* Reduce header size on mobile */ +@media only screen and (max-width: 720px) { + h1 { + font-size: 2.5rem; + } + + h2 { + font-size: 2.1rem; + } + + h3 { + font-size: 1.75rem; + } + + h4 { + font-size: 1.25rem; + } +} + +/* Format links & buttons */ +a, +a:visited { + color: var(--accent); +} + +a:hover { + text-decoration: none; +} + +button, +[role="button"], +input[type="submit"], +input[type="reset"], +input[type="button"], +label[type="button"] { + border: none; + border-radius: var(--standard-border-radius); + background-color: var(--accent); + font-size: 1rem; + color: var(--bg); + padding: 0.7rem 0.9rem; + margin: 0.5rem 0; + + /* Ensure buttons use correct font */ + font-family: inherit; +} + +button[disabled], +[role="button"][aria-disabled="true"], +input[type="submit"][disabled], +input[type="reset"][disabled], +input[type="button"][disabled], +input[type="checkbox"][disabled], +input[type="radio"][disabled], +select[disabled] { + cursor: not-allowed; +} + +input:disabled, +textarea:disabled, +select:disabled, +button[disabled] { + cursor: not-allowed; + background-color: var(--disabled); + color: var(--text-light) +} + +input[type="range"] { + padding: 0; +} + +/* Set the cursor to '?' on an abbreviation and style the abbreviation to show that there is more information underneath */ +abbr[title] { + cursor: help; + text-decoration-line: underline; + text-decoration-style: dotted; +} + +button:enabled:hover, +[role="button"]:not([aria-disabled="true"]):hover, +input[type="submit"]:enabled:hover, +input[type="reset"]:enabled:hover, +input[type="button"]:enabled:hover, +label[type="button"]:hover { + filter: brightness(1.4); + cursor: pointer; +} + +button:focus-visible:where(:enabled, [role="button"]:not([aria-disabled="true"])), +input:enabled:focus-visible:where( + [type="submit"], + [type="reset"], + [type="button"] +) { + outline: 2px solid var(--accent); + outline-offset: 1px; +} + +/* Format navigation */ +header > nav { + font-size: 1rem; + line-height: 2; + padding: 1rem 0 0 0; +} + +/* Use flexbox to allow items to wrap, as needed */ +header > nav ul, +header > nav ol { + align-content: space-around; + align-items: center; + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: center; + list-style-type: none; + margin: 0; + padding: 0; +} + +/* List items are inline elements, make them behave more like blocks */ +header > nav ul li, +header > nav ol li { + display: inline-block; +} + +header > nav a, +header > nav a:visited { + margin: 0 0.5rem 1rem 0.5rem; + border: 1px solid var(--border); + border-radius: var(--standard-border-radius); + color: var(--text); + display: inline-block; + padding: 0.1rem 1rem; + text-decoration: none; +} + +header > nav a:hover, +header > nav a.current { + border-color: var(--accent); + color: var(--accent); + cursor: pointer; +} + +/* Reduce nav side on mobile */ +@media only screen and (max-width: 720px) { + header > nav a { + border: none; + padding: 0; + text-decoration: underline; + line-height: 1; + } +} + +/* Consolidate box styling */ +aside, details, pre, progress { + background-color: var(--accent-bg); + border: 1px solid var(--border); + border-radius: var(--standard-border-radius); + margin-bottom: 1rem; +} + +aside { + font-size: 1rem; + width: 30%; + padding: 0 15px; + margin-inline-start: 15px; + float: right; +} +*[dir="rtl"] aside { + float: left; +} + +/* Make aside full-width on mobile */ +@media only screen and (max-width: 720px) { + aside { + width: 100%; + float: none; + margin-inline-start: 0; + } +} + +article, fieldset, dialog { + border: 1px solid var(--border); + padding: 1rem; + border-radius: var(--standard-border-radius); + margin-bottom: 1rem; +} + +article h2:first-child, +section h2:first-child { + margin-top: 1rem; +} + +section { + border-top: 1px solid var(--border); + border-bottom: 1px solid var(--border); + padding: 2rem 1rem; + margin: 3rem 0; +} + +/* Don't double separators when chaining sections */ +section + section, +section:first-child { + border-top: 0; + padding-top: 0; +} + +section:last-child { + border-bottom: 0; + padding-bottom: 0; +} + +details { + padding: 0.7rem 1rem; +} + +summary { + cursor: pointer; + font-weight: bold; + padding: 0.7rem 1rem; + margin: -0.7rem -1rem; + word-break: break-all; +} + +details[open] > summary + * { + margin-top: 0; +} + +details[open] > summary { + margin-bottom: 0.5rem; +} + +details[open] > :last-child { + margin-bottom: 0; +} + +/* Format tables */ +table { + border-collapse: collapse; + margin: 1.5rem 0; +} + +td, +th { + border: 1px solid var(--border); + text-align: start; + padding: 0.5rem; +} + +th { + background-color: var(--accent-bg); + font-weight: bold; +} + +tr:nth-child(even) { + /* Set every other cell slightly darker. Improves readability. */ + background-color: var(--accent-bg); +} + +table caption { + font-weight: bold; + margin-bottom: 0.5rem; +} + +/* Format forms */ +textarea, +select, +input { + font-size: inherit; + font-family: inherit; + padding: 0.5rem; + margin-bottom: 0.5rem; + color: var(--text); + background-color: var(--bg); + border: 1px solid var(--border); + border-radius: var(--standard-border-radius); + box-shadow: none; + max-width: 100%; + display: inline-block; +} +label { + display: block; +} +textarea:not([cols]) { + width: 100%; +} + +/* Add arrow to drop-down */ +select:not([multiple]) { + background-image: linear-gradient(45deg, transparent 49%, var(--text) 51%), + linear-gradient(135deg, var(--text) 51%, transparent 49%); + background-position: calc(100% - 15px), calc(100% - 10px); + background-size: 5px 5px, 5px 5px; + background-repeat: no-repeat; + padding-inline-end: 25px; +} +*[dir="rtl"] select:not([multiple]) { + background-position: 10px, 15px; +} + +/* checkbox and radio button style */ +input[type="checkbox"], +input[type="radio"] { + vertical-align: middle; + position: relative; + width: min-content; +} + +input[type="checkbox"] + label, +input[type="radio"] + label { + display: inline-block; +} + +input[type="radio"] { + border-radius: 100%; +} + +input[type="checkbox"]:checked, +input[type="radio"]:checked { + background-color: var(--accent); +} + +input[type="checkbox"]:checked::after { + /* Creates a rectangle with colored right and bottom borders which is rotated to look like a check mark */ + content: " "; + width: 0.18em; + height: 0.32em; + border-radius: 0; + position: absolute; + top: 0.05em; + left: 0.17em; + background-color: transparent; + border-right: solid var(--bg) 0.08em; + border-bottom: solid var(--bg) 0.08em; + font-size: 1.8em; + transform: rotate(45deg); +} +input[type="radio"]:checked::after { + /* creates a colored circle for the checked radio button */ + content: " "; + width: 0.25em; + height: 0.25em; + border-radius: 100%; + position: absolute; + top: 0.125em; + background-color: var(--bg); + left: 0.125em; + font-size: 32px; +} + +/* Makes input fields wider on smaller screens */ +@media only screen and (max-width: 720px) { + textarea, + select, + input { + width: 100%; + } +} + +/* Set a height for color input */ +input[type="color"] { + height: 2.5rem; + padding: 0.2rem; +} + +/* do not show border around file selector button */ +input[type="file"] { + border: 0; +} + +/* Misc body elements */ +hr { + border: none; + height: 1px; + background: var(--border); + margin: 1rem auto; +} + +mark { + padding: 2px 5px; + border-radius: var(--standard-border-radius); + background-color: var(--marked); + color: black; +} + +img, +video { + max-width: 100%; + height: auto; + border-radius: var(--standard-border-radius); +} + +figure { + margin: 0; + display: block; + overflow-x: auto; +} + +figcaption { + text-align: center; + font-size: 0.9rem; + color: var(--text-light); + margin-bottom: 1rem; +} + +blockquote { + margin-inline-start: 2rem; + margin-inline-end: 0; + margin-block: 2rem; + padding: 0.4rem 0.8rem; + border-inline-start: 0.35rem solid var(--accent); + color: var(--text-light); + font-style: italic; +} + +cite { + font-size: 0.9rem; + color: var(--text-light); + font-style: normal; +} + +dt { + color: var(--text-light); +} + +/* Use mono font for code elements */ +code, +pre, +pre span, +kbd, +samp { + font-family: var(--mono-font); + color: var(--code); +} + +kbd { + color: var(--preformatted); + border: 1px solid var(--preformatted); + border-bottom: 3px solid var(--preformatted); + border-radius: var(--standard-border-radius); + padding: 0.1rem 0.4rem; +} + +pre { + padding: 1rem 1.4rem; + max-width: 100%; + overflow: auto; + color: var(--preformatted); +} + +/* Fix embedded code within pre */ +pre code { + color: var(--preformatted); + background: none; + margin: 0; + padding: 0; +} + +/* Progress bars */ +/* Declarations are repeated because you */ +/* cannot combine vendor-specific selectors */ +progress { + width: 100%; +} + +progress:indeterminate { + background-color: var(--accent-bg); +} + +progress::-webkit-progress-bar { + border-radius: var(--standard-border-radius); + background-color: var(--accent-bg); +} + +progress::-webkit-progress-value { + border-radius: var(--standard-border-radius); + background-color: var(--accent); +} + +progress::-moz-progress-bar { + border-radius: var(--standard-border-radius); + background-color: var(--accent); + transition-property: width; + transition-duration: 0.3s; +} + +progress:indeterminate::-moz-progress-bar { + background-color: var(--accent-bg); +} + +dialog { + max-width: 40rem; + margin: auto; +} + +dialog::backdrop { + background-color: var(--bg); + opacity: 0.8; +} + +@media only screen and (max-width: 720px) { + dialog { + max-width: 100%; + margin: auto 1em; + } +} + +/* Classes for buttons and notices */ +.button, +.button:visited { + display: inline-block; + text-decoration: none; + border: none; + border-radius: 5px; + background: var(--accent); + font-size: 1rem; + color: var(--bg); + padding: 0.7rem 0.9rem; + margin: 0.5rem 0; +} + +.button:hover, +.button:focus { + filter: brightness(1.4); + cursor: pointer; +} + +.notice { + background: var(--accent-bg); + border: 2px solid var(--border); + border-radius: 5px; + padding: 1.5rem; + margin: 2rem 0; +} \ No newline at end of file diff --git a/JS/microlight.js b/JS/microlight.js new file mode 100644 index 0000000..34b5103 --- /dev/null +++ b/JS/microlight.js @@ -0,0 +1,24 @@ +! function(e, t) { + "function" == typeof define && define.amd ? define(["exports"], t) : t("undefined" != typeof exports ? exports : e.microlight = {}) +}(this, function(e) { + var t, n, i, o = window, + r = document, + a = "appendChild", + l = "test", + c = ";text-shadow:", + s = "opacity:.", + d = " 0px 0px ", + u = "3px 0px 5", + f = ")", + p = function(e) { + for (n = r.getElementsByClassName(e || "language*"), t = 0; i = n[t++];) + for (var p, h, g, m, y, x = i.textContent, b = 0, w = x[0], v = 1, k = i.innerHTML = "", C = 0, N = /(\d*\, \d*\, \d*)(, ([.\d]*))?/g.exec(o.getComputedStyle(i).color), E = "px rgba(" + N[1] + ",", S = N[3] || 1; h = p, p = 7 > C && "\\" == p ? 1 : v;) { + if (v = w, w = x[++b], m = k.length > 1, !v || C > 8 && "\n" == v || [/\S/ [l](v), 1, 1, !/[$\w]/ [l](v), ("/" == p || "\n" == p) && m, '"' == p && m, "'" == p && m, x[b - 4] + h + p == "-->", h + p == "*/"][C]) + for (k && (i[a](y = r.createElement("span")).setAttribute("style", ["", c + d + 9 + E + .7 * S + ")," + d + 2 + E + .4 * S + f, s + 6 + c + d + 7 + E + S / 4 + ")," + d + 3 + E + S / 4 + f, s + 7 + c + u + E + S / 5 + "),-" + u + E + S / 5 + f, "font-style:italic;" + s + 5 + c + u + E + S / 4 + "),-" + u + E + S / 4 + f][C ? 3 > C ? 2 : C > 6 ? 4 : C > 3 ? 3 : +/^(a(bstract|lias|nd|rguments|rray|s(m|sert)?|uto)|b(ase|egin|ool(ean)?|reak|yte)|c(ase|atch|har|hecked|lass|lone|ompl|onst|ontinue)|de(bugger|cimal|clare|f(ault|er)?|init|l(egate|ete)?)|do|double|e(cho|ls?if|lse(if)?|nd|nsure|num|vent|x(cept|ec|p(licit|ort)|te(nds|nsion|rn)))|f(allthrough|alse|inal(ly)?|ixed|loat|or(each)?|riend|rom|unc(tion)?)|global|goto|guard|i(f|mp(lements|licit|ort)|n(it|clude(_once)?|line|out|stanceof|t(erface|ernal)?)?|s)|l(ambda|et|ock|ong)|m(icrolight|odule|utable)|NaN|n(amespace|ative|ext|ew|il|ot|ull)|o(bject|perator|r|ut|verride)|p(ackage|arams|rivate|rotected|rotocol|ublic)|r(aise|e(adonly|do|f|gister|peat|quire(_once)?|scue|strict|try|turn))|s(byte|ealed|elf|hort|igned|izeof|tatic|tring|truct|ubscript|uper|ynchronized|witch)|t(emplate|hen|his|hrows?|ransient|rue|ry|ype(alias|def|id|name|of))|u(n(checked|def(ined)?|ion|less|signed|til)|se|sing)|v(ar|irtual|oid|olatile)|w(char_t|hen|here|hile|ith)|xor|yield)$/ [l](k) : 0]), y[a](r.createTextNode(k))), g = C && 7 > C ? C : g, k = "", C = 11; ![1, /[\/{}[(\-+*=<>:;|\\.,?!&@~]/ [l](v), /[\])]/ [l](v), /[$\w]/ [l](v), "/" == v && 2 > g && "<" != p, '"' == v, "'" == v, v + w + x[b + 1] + x[b + 2] == " + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Easy video tips for CLI chads

+ + + + + + 4 minute read + + + + + Published: 2023-11-02 + +
+
+

When it comes to encoding videos, FFmpeg is a ubiquitous tool, it's supported on nearly every modern platform, and many retro platforms, includes a plethora of encoders and decoders, and targets most currently used architectures.

+

for decoding videos and watching them MPV is easily king here, with a great deal of flexibility and customization, you can do a LOT to tune your viewing experience perfectly.

+

One of the downsides to using FFmpeg and other CLI tools is how many options they can sometimes have and scripting these options can sometimes be a complicated mess. This post will cover a few tips and tricks to make it just a little bit easier.

+

Flexible scripting.

+

This will focus on bash scripts however these can easily be implemented on windows via python or powershell scripting too.

+

Taking a look at the given example it is small and concise, however it is hard to read and is prone to spelling mistakes that can be hard to point out.

+
ffmpeg -init_hw_device vulkan -i Pacific-rim.webm \
+-vf hwupload,libplacebo=tonemapping=st2094-40:peak_detect=false:contrast_recovery=0.5:colorspace=bt2020nc:color_primaries=bt2020:color_trc=arib-std-b67:range=tv:format=yuv420p10le,hwdownload,format=yuv420p10le \
+ -c:v rawvideo -f nut - | mpv --cache=no -
+
+

instead of dealing with this mess, we can actually instead use an array instead, this lets us easily format the arguments, add comments, and as implied by the previous point, comment out arguments. and example of the exact same script is given below, going by character count alone the script is longer, however it is much easier to read and work with.

+
#!/bin/bash
+
+args=(
+###set this block as needed
+tonemapping=st2094-40
+:peak_detect=false
+:gamut_mode=perceptual
+:contrast_recovery=0.5
+###
+#:deband=true
+#:deband_iterations=4
+#:deband_threshold=8
+###set HLG
+:colorspace=bt2020nc
+:color_primaries=bt2020
+:color_trc=arib-std-b67
+###
+:range=tv
+:format=yuv420p10le,hwdownload,format=yuv420p10le
+)
+
+formated=$(echo ${args[@]} | tr -d " ")
+
+ffmpeg -init_hw_device vulkan -i Pacific-rim.webm -vf hwupload,libplacebo=$formated -c:v rawvideo -f nut - | mpv --cache=no -
+
+

Comparing videos with MPV

+

Comparing videos can be one of the worst experiences from proprietary tools that are lacking in features, to open source tools that are really janky, My prefered way of comparing videos is actually using MPV.

+

this is a link to my mpv config if you scroll to the bottom I have a couple profiles created to help in comparing videos 2sidesplit and the various diff profiles will most likely be the most useful for visualizing changes. while dedicated tools can be more helpful, MPV is more then enough for simple comparisons.

+

to use MPV in this manor, simply launch mpv file1.mkv --external-file=file2.mkv then you can use --profile=diff to use one of the diff profiles. if you instead want to be able to switch between videos do not specify a profile and instead use set vid 1 set vid 2 to swap between videos, this can scale to as many videos as you have. you can set keybinds by adding the below to your input.conf. the key buttons will then be ctrl + shift + 1 etc.

+
Ctrl+! set vid 1
+Ctrl+@ set vid 2
+Ctrl+# set vid 3
+Ctrl+$ set vid 4
+
+

Comparing MPV settings

+

This is a link to a script I modify and use to collect screenshots to use this script, I highly recommend setting a profile like so in your mpv.conf and launching with it. it's important to set input-ipc-server as this is what the script I made uses to interact with mpv.

+
[comp]
+#input-ipc-server=\\.\pipe\mpvsocket  ### for windows
+input-ipc-server=/tmp/mpvsocket           ### for linux
+
+##set these as you please
+screenshot-format=jxl
+screenshot-directory=~/Pictures/mpv
+screenshot-jxl-distance=0.0
+screenshot-high-bit-depth=yes
+screenshot-jxl-effort=3
+
+##set this to yes if you want to retain the original colorspace
+screenshot-tag-colorspace=no
+
+

Generating images to share

+

while you really shouldn't use keyframes to showcase your new av1 encodes, we already know you will anyways, so here is an easy way to do so.

+

this specifically is an example to create animated AVIF images for each keyframe in your encode. this is great since you can use an entire GOP for showcasing your encodes. +ffmpeg -i .\nyan-av1.webm -c copy -f segment .\avif-keys\out-%02d.avif

+

this on the otherhand will generate a png of every keyframe you have. +ffmpeg -i ..\nyan-av1.webm -c:v copy -an -bsf:v "noise=drop=not(key)" -f nut - | ffmpeg -vsync 0 -i - out/image-%03d.png

+

This is incomplete, and will be added to in the future

+ +
+ + +
+
+

+ + + + in Tidbits + + + and + tagged + + Linux + + + + , + + + + + Video + + + + and + + + + + CLI + + + + + +

+ + +
+ + + + +
+
+
+
+
+ + + + diff --git a/blog/hidden-jxl-benefits/index.html b/blog/hidden-jxl-benefits/index.html new file mode 100644 index 0000000..cc10c9f --- /dev/null +++ b/blog/hidden-jxl-benefits/index.html @@ -0,0 +1,345 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Jpeg-xl is kinda cool.

+ + + + + + 42 minute read + + + + + Published: 2023-11-20 + +
+
+
It is highly reccomended to read the notes section after reading this page.
+

While many of the benefits of JXL have been told a lot, there are some things about JXL that I really think should be more popular. JXL is so jam packed full of features that some of the real gems have been ignored. These gems are often features that can be utilized either today, or in a possible short term future.

+

Note: in this article, when I talk about HDR, I am talking explicitly about the intended "Brightness" of the image. HDR is a poorly, if at all, defined term. However the general use of it seems to be talking about the potential discernible brightness differences within an image. This is how it is used here.

+

Directory

+

These are the main parts of this post. You can get an idea of what is covered in this post using the directory, as well as use it to search for the appropriate sections.

+
    +
  1. XYB, more then just compression. Are weird colors a problem of the past?
  2. +
  3. Hey, You said that HDR is hard? But I heard about this new tech called "Gain maps", what about that?
  4. +
  5. I have a need for speed that can't be satiated, but oh boy does JXL get close.
  6. +
  7. Can you tell me how consistent the main JXL encoder is? You mentioned it before.
  8. +
  9. OK, JXL is kinda cool. I would use it but man, C++ sucks.
  10. +
  11. Notes. (Read this)
  12. +
+

Edit History

+

Major edits and corrections are at the top of the page for transparency. They will not be made to the article itself unless it is a minor edit. Everything you see here takes presidence over both the article itself and the notes section.

+
    +
  1. We now have a JXL image gallery for android. This is not a simple JXL viewer like before, but a fork of fossify gallery with JXL support. A proper full blown gallery app. This is quite important since now it is viable to store all of your phone images in JXL format.
  2. +
+

XYB, more then just compression. Are weird colors a problem of the past?

+

Colorspaces are complicated things, they are a product of trying to squeeze the absolute best they can out of the hardware they are designed for, They have historically had severe limits, but first, a quick primer as to why they are needed.

+

I thought we learned about color in primary school...

+

Images are made of pixels. This is fairly common knowledge, However did you know that we can actually represent and store pixels in a large variety of formats? "But why quack?" It turns out that human eyes, are extremely complicated. With that complexity they also become very flexible and very powerful things, being able to adapt to a large variety of viewing conditions.

+

Content creators wish to make the best looking pictures and videos they can, because of this they want to be able to maximize the flexibility and capacity of the images to the best of their capability. Sadly, the hardware we have today is no where near good enough to make the best use out of human vision. Obviously hardware 10 years ago, 20 years ago, and obviously more so 30 years was even worse then it is today.

+

YUV/YCbCr and RGB are pixel formats you may have heard of, This refers to how the pixels themselves are stored. They are formats a display can receive and utilize directly, and are designed to be effectively negligible in speed to convert back and forth from. This was something very important back before the 2000s when hardware was much weaker then it is today. Doing conversions could actually have a noticeable performance penalty back then. We can consider these "Display based formats." Since they try their best to model data in formats they can be displayed in. However we can't simply store the raw YUV or RGB data, Not only does this take up a lot of space, Displays can't handle the vast majority of the data we could actually store here anyways, so we assign colorspaces to this data so we can make the best use of the data by optimizing it for the display we are using.

+

These colorspaces while great for size and speed, have severe limitations explicitly because they are typically designed for the limitations of the displays they target. This means they really aren't the most flexible, the defacto standard we use for images today was designed for CRT DISPLAYS, sRGB came out in the 90s... For anyone who is familiar with the pace of technology, this is ancient. In fact, this would be considered "retro" by other standards. Yet we still use it today.

+

OF COURSE content creators weren't satisfied with this, why would they be? I doubt anyone average person has used a CRT in over a decade now. Because of this, there are many more modern colorspaces available to better fit the displays we use. In fact many displays can actually support multiple of these different colorspaces now, Technology has progressed a lot after all, However the standards we use and default to haven't. Because of this we can only display one colorspace at a time, and while we can convert between colorspaces. The methodology for doing this is vast, and leads to a variety of both good and bad results.

+

Because of this incongruity, we are still mostly stuck with sRGB. Since sRGB is the default, people master content for sRGB the most. Because of the fact people master content for sRGB the most, manufacturers default to sRGB. A classic catch 22, the chicken and egg problem. This is made worse by Windows and Linux having poor support for color management. Apple excels in this at the very least.

+

So, what is the XYB thing? Why is this different? Well XYB is an LMS based format. This probably doesn't mean much to a lot of people and that's fine. The super TLDR is that instead of modeling how displays emit light, LMS based formats model how humans perceive light. This means the limitations of LMS are actually based on how humans perceive light, instead of how displays produce it. This means we can consider LMS to be a "Human based format" Instead of being a "Display based format". If that you remember I said storing raw RGB or YUV is expensive, This is the exact same case for LMS. However XYB is a modification of LMS designed to be a lot more efficient to store, and it works very well.

+

So JXL can store using this "Human based format" but in the end we still need to convert it to a "Display based format". This does incur a slight penalty to performance sure. Thankfully we have long passed the 1990's and 2000's where this made a significant difference. When it comes modern hardware this is essentially "free".

+

As mentioned before there are a lot of display based formats. I've already told you about the ancient and venerable sRGB. This is one such format. However there are so many more formats, DCI-P3, Apple-p3 BT.2020 etc. JXL since it is this human based format it's effective limitations far less then the display based formats. Since JXL needs to be converted to a display based format so we can actually see the image, due to it's lesser limitations, it can be decoded to any of the aforementioned formats!

+

Thats right, JXL decoders by necessity have built in degree color management. If you have a wide gamut monitor and have seen images look wrong on them, obviously wrong, this is typically because the color space of the image is wrong. A good JXL implementation can pretty much make this an issue of the past. This also means that JXL is a somewhat "Future proof format." Of course innovation will come along one day which might make XYB obsolete. However when it comes to being able to store images which could be graded explicitly for this format could be a much higher quality then what we can achieve today.

+

HDR never looked right too, is that fixed as well?

+

In short? Sadly No. Color is complicated. Part of color is this thing called "luminance" or as many people know it as how bright something is.

+

This color management stuff I was talking about does not take tone mapping into consideration. Tone mapping can once again be a very complicated topic much like seemingly every single tidbit of color. However you might be wondering right now what even is tone mapping? Once again we need a primer. We need to know what a colorspace transfer is and why it's important. The first bit of what we need to understand that human's don't see light linearly. Imagine a light bulb. If we have a dim light bulb, and we double the power to it so it double's in brightness, humans don't perceive that as twice as bright.

+

So let us imagine we have a series of numbers from 0 to 100. Each number represents the intensity of how hard a pixel, or light source is shining. 100 is as bright as it can possibly shine, and 0 is as low as it can possibly go (remember LCD panels don't actually turn off). Humans are much more sensitive to "dimness" then we are to "brightness". This works out great for us, remember how I mentioned earlier that we can't simply store the raw RGB/YUV data? This means we can actually take some of the data away from the "bright" parts of the image, and hand them over to the dark parts.

+

This is what a transfer function does. It shapes how bright that stepping is so it can better correlate to how humans actually see light given the limitations we need to deal with. Different transfer functions shape that light in different ways, which allows transfer functions to actually choose how large the range of dimness to brightness it can represent. So, What does tone mapping do? Put simply tone mapping is what allows us to change between greatly different transfer functions while keeping the intended perceived lightness differences of the image.

+

For a very simple example on why tone mapping is important, Here is an interactable desmos graph with sRGB, Gamma 2.2 and Gamma 2.4 as well as PQ transfer curves are graphed. With an image of it directly below. See the Line sticking out like a sore thumb? That's PQ.

+

embed an image of the graph

+

When looking at this graph the Y axis corresponds with the intensity of light with 0 being off and 1 being full intensity. The X axis corresponds to the actual pixel value as it is actually stored in the file. The bottom parts of each segment are the actual lines, everything else is just math. You can enable and disable the lines by clicking the blue circle on the left side of the math box. It's important to note that with sRGB and Gamma2.2 full brightness is somewhere from 80 to 300 nits. It is supposed to be 80 nits, but there is some variation as to how it's actually done there and it's not uncommon to see them graded with a variety of intended max nits. With PQ however, It has a solid 10000 nit max. The Desmos link has boxes where you can adjust the peak. If you want to see it at an "Intended 1:1" scale, set PQ to "10000", and sRGB for "80".

+

This show cases two things, The first being that this is why we cannot display an sRGB directly on a PQ screen, and vice versa. The lines don't get close to corresponding, which means the luminance of the image will be all sorts of messed up. To remedy this, we undo the PQ transfer and apply the sRGB transfer, However this leads into the second point

+

The second being why we need tonemapping. If you were to map an sRGB image intended for 80nits 1:1 on a monitor with PQ, where on average "White" will be from 200-300 nits (203 is recommended, but there is some leeway in actual implementations). Chances are you won't even be able to see a good portion of the sRGB image. The luminance would be everywhere, Dimness would be too dark, and brightness would be too bright.

+

Alright, Complicated stuff aside lets have a quick terminology time. sRGB is a color space. A colour space is defined as a collection of three things. A Transfer, a Gamut, and a white point. JXL can handle gamut and white point mapping perfectly fine, but as we covered, transfer is a lot more complicated to map because of the issue where intended luminance values of sRGB and PQ are greatly different. However there is actually another issue then just the luminance range. Gamma2.2 and sRGB's transfer uses something called a relative luminance system, and transfers like PQ use an absolute luminance. (PS. HLG uses a relative transfer.)

+

Why does this matter? Well it turns out, It does complicate things a little bit, but the main issue is that, It's not actually consistent on what an sRGB image is intending it's peak luminance to be. Some images are graded on a 80 nit max display, some on a 200 nit max display etc. It's actually kind of arbitrary. This means what 1.0 represents can actually change depending on the image, and this changes how we need to map it.

+

Even pretending we live in a perfect world where SDR peak is consistent, how do we actually do the tone mapping to make the two systems work together. Lets talk about rendering SDR on HDR for a second. This is commonly known as "Inverse Tone mapping." And this is actually a bit easier to do since we are "expanding" the image instead of "compressing" the image.

+

While SDR images can be roughly mapped to 1:203 where 1: represents the peak brightness of the sRGB image, and :203 the absolute brightness it will be displayed at. This kind of "dumb" mapping doesn't always look good sadly largely in part because of the issues we talked about before. The better solution would be to figure out the intended peak of the image, and try to do some tone mapping to 203. However considering we can rarely actually know what the source image is actually intending. Doing the naive mapping is better then none. This is always something client applications could try to expose as well.

+

On the other hand, imagine scenario where we do no tone mapping instead, the white image you look at on a normal screen, would be dumping an 10000nit on a theoretical perfect PQ display. To put into context how bright this is. This is about as bright as a high quality office florescent light bulb. You have one of the new HDR Ipads. Open an HDR white image, max out the brightness, go into a room and make it completely dark. This can be harsh enough that people have reported headaches from looking at it too long, and sudden exposure can actually induce discomfort and in some people minor amounts of pain. These devices are about 1000 nits of output. So dumb tone mapping is for sure better then no tone mapping at all.

+

And this is the easy form of tone mapping, HDR to SDR is significantly harder to do. We are now compressing the possible brightness differences, but also the maximum brightness of the image needs to be brought down significantly. Being able to reconcile the two issues isn't too hard when we are doing it by hand. We however, aren't doing it by hand, we need a one size fits all glove. This is actually very much non trivial, There are ways to do it, but they do come with a degree of difficulty and performance hit. bt.2446a is a great tone mapping method assuming the content is graded a very specific way. This means it's great for some content, and terrible for other content. mpv's spline tone mapping mode with peak detect and contrast recovery is an excellent "one size fits all" tone mapping method. It however is a rather more complex tone mapping solution, and can actually be worse then bt.2446a for well mastered content in my opinion. But of course, you have plenty of other tone mapping functions, ACES Filmic is another popular curve.

+

So finally, what does JXL do here? the JXL spec does have some tone mapping data built into it. However it leaves the actual implementation of tone mapping up to the JXL library. for instance libjxl can preform some rudimentary tone mapping when decoding the XYB image. However it is quite basic but they do the job when converting SDR to HDR. I however don't recommend it for HDR to SDR since it tends to not do a very good job, Images will usually look kind of messed up.

+

so yeah, HDR/SDR? Not exactly a solved problem. Nothing can easily overcome the limitations of grading an image for a specific luminance range and displaying it on a different range. However make no mistake, even if HDR doesn't become super easy with this, it is still really nice to have proper gamut support baked in. Gamut issues have plagued users long before transfer issues have.

+

OK, The HDR still kinda sucks, but everything else is great right?

+

Kinda. I know I said that weird colors are a thing of the past, but this is only somewhat true.

+

A) The benefits of this come really only when the image is XYB encoded. cjxl when doing lossless encoding, will not convert the image to an XYB colorspace and tag it as such (even if you grade an image in XYB and encode that with cjxl), but will encode the image "as is" instead. While this is good for encoding losslessly, this does sadly mean we don't get the color management benefits of this currently. libjxl also doesn't currently allow us to master XYB images. We can however still use very large gamut images and work down from there, so the benefit is still immediate.

+

Now, How much does it matter that you need to use lossy encoding? Even if something isn't lossless, that doesn't always mean the viewers will be able to tell at all. If an image is technically lossy, but it's not expected that a human consumer will be able to tell or notice degradation, these are commonly called "Visually lossless" images. (Though be warned, Lots of companies will take liberties in calling something such).

+

For example, encoding an image with cjxl -d 0.5 -e 8, I can't tell the difference between the JXL encodes image and the original image at all. I've attempted multiple blind tests for myself and failed each one. Perhaps if I had a better monitor I could tell the difference, but when I am looking this hard, It's not an issue for delivery images at all. It's also worth noting that when doing this, there is significant space savings to be had as well.

+

So we have a trade off here. We don't get lossless images, however we can support any kind of image gamut pretty nicely. I would say that is a fair trade off myself at the very least.

+

B) When doing gamut mapping, It is a little more complicated then just decoding to the gamut. XYB can be converted to XYZ perfectly fine which works great as the middle man. However what happens when you input a BT.2020 color into a XYB, Then decode that to an sRGB image? It will look "mostly right". You see, Gamut mapping is also a little involved, but at least much less complicated then tone mapping. Much like tone mapping. The implementation of gamut mapping seems to be left up to the implementation. However at the very least, even a mediocre gamut map is better then no gamut map. Libjxl does have proper gamut mapping support, so this is not an issue for this specific case, but other implementations may not support it yet.

+

The other thing to note here, grading a wide gamut image, and displaying it on a lower gamut display will always look worse. This isn't because of the limits gamut mapping. This is because you are actually displaying it with an inferior display. If you are trying to figure out why the image looks worse, this is a likely explanation.

+

Alright enough advertisement, How do I use it?

+

A good question, with an easy answer. The first thing to do is make an image with an XYB encoding. While you can use any image, to showcase that this is actually working we are going to take an sRGB image and make it into a BT.2020 image. Encode your image with a lossy setting, if you don't want any easily noticable (if any) quality loss you can simply do cjxl -e 7 -d 0.5 imagein.png imageout.jxl (If your source is a Jpeg you will need to disable lossless Jpeg, but keep in mind that this may not actually be smaller then the source image --lossless_jpeg=0).

+

The next step is to simply decode the image to a usable format. In this case let's use PNG. To first understand how djxl decodes image colorspace. let's take a look at this example cmdline. djxl --color_space RGB_D65_SRG_Per_SRG in.jxl sRGB.png

+

in.jxl and out.png are simple. but what about the color_space option?, it breaks down like so. 1_2_3_4_5.

+
    +
  1. Pixel format. This is the format of the pixels themselves.
  2. +
  3. White point. Part of a colorspace is white point, it defines how "warm" white is.
  4. +
  5. Gamut. as we talked about before, this is what we will need to change to support various gamut formats.
  6. +
  7. Rendering intent. This is tells us how we should shrink the gamut of the image.
  8. +
  9. Transfer. As we talked about before transfer is important because it shapes the light intensity. Ideally we wouldn't muck about here.
  10. +
+

So what does this cmdline do? It decodes the image as RGB pixels, D65 white point, sRGB gamut (also known as BT.709), Perceptual rendering intent, with an sRGB transfer. Modifying this to support BT.2020 is easy enough, although it's not well documented. we can use djxl --color_space RGB_D65_202_Per_SRG in.jxl 2020.png. This replaces the SRG gamut with a 202 section which stands for the BT.2020 gamut. We can know open these images in any viewer that doesn't do automatic gamut mapping to compare them and yup! One image is in BT.2020 and another in sRGB. If you open in a viewer that doesn't to gamut mapping BT.2020 will look "wrong". However when we open it up in an app that does do gamut mapping like mpv. We can see that the image does look proper still, while mpv is reporting a BT.2020 in the gamut.

+

As for something a little cool, we can also export directly to XYB. This could potentially be useful if you want to run a color managed app. You can directly convert the XYB to whatever format the app you are using is on. djxl --color_space XYB_Per in.jxl XYB.png. Some folk have even been creating XYB jpeg files to squeeze more quality out of them. I can't comment more on this since it isn't something I have personally tinkered with. However the article linked does go into some depth.

+

Finally, let's talk a bit about HDR and SDR. As you may have figured it out, we can also export HDR images or SDR images as we please. As said above, I only recommend doing from SDR to HDR. We can export it like so djxl --color_space RGB_D65_202_Per_PeQ --display_nits=1000 in.jxl HDR.png or we can use djxl --color_space RGB_D65_202_Per_HLG --display_nits=100 in.jxl HDR.png. There is a non insignificant chance you will encounter some visible brightness changes. However you will generally get close enough that it will be fine. You can try and tinker with the values to try and get the image as close as possible.

+

Hey, You said that HDR is hard? But I heard about this new tech called "Gain maps", what about that?

+

So while I just talked about how luminance tone mapping is actually pretty complicated (particularly when dealing with HDR to SDR), there is however something that is promising to make it much easier for us. Gain maps are an additional image layer that can be used to add extra luminance information to SDR images, effectively allowing us to create both an HDR image and an SDR image at the same time, in the same file, with minimal overhead.

+

These aren't quite as good as native HDR images, they do have limitations. Despite this, users who consume these images still get the "HDR" benefit and a lot of consumers find this to be quite good. On the other hand, people with a traditional SDR display don't have to worry about the complicated tone mapping mentioned previously, They will still receive the SDR picture as the original artist intended it. Whether this be Digital artist or a photographer, Both SDR and HDR users can enjoy a first class experience.

+

So after all this, does JXL support gain maps? Well no? Gain maps aren't a part of the JXL spec, but they also aren't a part of JPEG's spec either. Readers may be pleased to hear that JXL allows a large number of layers. This means JXL can "natively support" encoding gain map images, and while there is no spec for this the potential for this use case is there. It's up to ecosystem to decide whether or not this will be a route they will go.

+

I have a need for speed that can't be satiated, but oh boy does JXL get close.

+

The decode speed of lossy JXL images is actually really fast. Ok, yeah sure, even a potato can decode a single image. However lets say we have 30 images on screen at once. "Who needs that?" one may ask. Galleries of course! There are hundreds of galleries on the web, in our phones, on our PCs. JXL is certainly fast enough here, However what if I want something faster, what if when I say I have a need for speed, I mean real speed.

+

Something I have been testing for a while is using JXL image sequences as a mezzanine format. A very quick TLDR on what a mezzanine format is, it's a video format comprised of a sequence of still photos designed to make video editing faster. One needs only three major things from a mezzanine.

+
    +
  1. Still images for instant frame perfect seeking.
  2. +
  3. Fast enough decode speed to make scrubbing multiple videos on a timeline smooth.
  4. +
  5. Consistently high quality.
  6. +
+

Does JXL do satisfy this criteria? For still images, Obviously yes, we wouldn't be hear right now if this was a no. As for consistently high quality, JXL can do this quite nicely on the higher effort levels for sure which I will show case a bit later.

+

So, how fast is lossy jxl on my ryzen 2600?

+
    +
  • 30fps? Are you a beginner driver or elderly driver? I can't tell.
  • +
  • 60fps? Maybe you can enter turtle races. (Lossless JXL can be found here. Magically faster then some PNG sequences for me somehow...)
  • +
  • 90fps? We are barely scratching usable.
  • +
  • 150fps? At least we are getting somewhere.
  • +
  • 200+fps. Ah, Here you are!
  • +
+

Yeah, that's pretty fast, "But Quack! CineForm is much faster then that! And ProRes is even faster!!!" Okay yeah that is true. I lost on this battle, but they are also about 2x the size for similar quality. If you run out of SSD space for your mezzanines, it has happened to me twice, and I swear it will not again. (I said this the first time too.) You will learn the true meaning of slow. Using JXL allows me to get double the footage and avoid potential bandwidth bottlenecks which can cause timeline performance to suffer as well.

+

I'm not lying either, this was taken from December of year 2023 200+FPS with mpv under Wayland using Vulkan + mailbox using a Linux desktop with sway compositor.

+

marine-unison image here

+

many marine image here

+

I made this video by encoding the video to png, then I used the following commandline to encode the PNG to JXL, I then muxed that JXL sequence into an mkv and played it back using mpv the command is as follows. cjxl -e 3 -d 1 --faster_decoding=4

+

When encoding without the faster_decoding flag we get roughly half the speed as we do with it.

+

Slower marine-unison image here

+

many marine-unison slow image here

+

So how useful is this for a mezzanine? 200fps is just about scratching the surface as usable. I wouldn't use it in any complicated workflows, however if you are only working with 2 or 3 simultanious video clips at a time, Then JXL could very well be useful here. Im pretty excited to see how far JXL will go in this area. Faster CPUs will mean faster decode times, and I am sure that the faster_decoding flag could likely be optimized too. We may also see potential optimizations when it comes to libjxl's decoder, or potentially alternative, faster decoders.

+

"OK quack, That's cool and all, but not everyone cares about a video, I just want to make sure my old craptop will load them fast enough. What about Mr. Low end? No no, Not you old desktop low? I mean low end"

+

Sure, you want low end? do you remember those old windows netbooks? I do, in fact I have one right here!.

+

A HP compaq mini with the specs as follows

+
root@archiso ~ # neofetch --backend off
+root@archiso
+------------
+OS: Arch Linux i686
+Host: Compaq Mini 110c-1100 0394110000001C00000300000
+Kernel: 6.1.10-arch1-1.0
+Uptime: 29 mins
+Packages: 398 (pacman)
+Shell: zsh 5.9
+Resolution: 1024x600
+Terminal: /dev/pts/0
+CPU: Intel Atom N270 (2) @ 1.600GHz
+GPU: Intel Mobile 945GM/GMS/GME, 943/940GML Express
+Memory: 215MiB / 977MiB
+ 
+
+

Now, lets see how fast libjxl can be. If you have a JXL enabled browser, the images will embed for you! If not, the links will be at the bottom of this section

+

The first test is a 4k JXL image encoded from a JPEG image using cjxl -d 0 -e 7 --lossless_jpeg=0 It for sure takes a while to decode, This isn't something I would want to look at on a regular basis. Lossless images encoded through cjxl -e7 really don't have the greatest decode speed on a modern PC, and we can see this hits us hard here. Thankfully, you probably won't come across too many of these on devices this old anyways.

+
root@archiso ~ # hyperfine --runs 5 'djxl ina4k-nore.jxl --disable_output'
+Benchmark 1: djxl ina4k-nore.jxl --disable_output
+  Time (mean ± σ):     19.803 s ±  0.072 s    [User: 38.257 s, System: 0.654 s]
+  Range (min … max):   19.727 s … 19.882 s    5 runs
+
+hyperfine --runs 5 'djxl ina4k-nore.jxl --disable_output'  192.25s user 3.80s system 195% cpu 1:40.10 total
+
+

<JXL> Ina is a comfy streamer, A shame you can't see this image.

+

For this next image, This is "The Woag". It is an 8k image lossless encode using cjxl -e 7 -d 0 with Jpeg reconstruction. The rendering of this image might even slow down a desktop browser! However we can see when it comes to decoding the image keeping the jpeg reconstruction support actually saves us a lot of time despite this image being twice as large being an 8k test image.

+
root@archiso ~ # hyperfine --runs 5 'djxl thewoag.jxl --disable_output'
+Benchmark 1: djxl thewoag.jxl --disable_output
+  Time (mean ± σ):      8.871 s ±  0.017 s    [User: 15.441 s, System: 1.623 s]
+  Range (min … max):    8.841 s …  8.883 s    5 runs
+  
+hyperfine --runs 5 'djxl thewoag.jxl --disable_output'  78.02s user 8.58s system 190% cpu 45.428 total
+
+

<JXL> The woag is pretty big! This might be slowing down JXL enabled browsers though.

+

And finally we have an image encoded with cjxl -e 7 -d 1 from a PNG source. This is more likely the images you will wind up coming across. This for sure is still too slow to be usable for a many images. Then again, the laptop itself is a little to slow to even do modern browsing.

+
root@archiso ~ # hyperfine --runs 5 'djxl mona.jxl --disable_output'
+Benchmark 1: djxl mona.jxl --disable_output
+  Time (mean ± σ):      5.944 s ±  0.007 s    [User: 10.453 s, System: 0.925 s]
+  Range (min … max):    5.934 s …  5.954 s    5 runs
+
+hyperfine --runs 5 'djxl mona.jxl --disable_output'  53.07s user 5.03s system 188% cpu 30.790 total
+
+

<JXL> I may have a small addiction to Genshin, The game is great and so is the art you are missing!

+

As you can see, Even on a really old 1GB dual core atom, JXL is surprisingly fast. I wouldn't recommend browsing a JXL enabled page. But I also wouldn't recommend using one of these machines in the first place. It is also worth noting that this is an Arch linux Live boot, not a full install. This might negatively effect speed a bit too. Considering these devices were hardly usable when they were new however, I consider this at least a small win.

+

Ina: https://files.catbox.moe/5obp2y.jxl

+

Thewoag: https://files.catbox.moe/fsmdyy.jxl

+

Mona: https://files.catbox.moe/6o1t4r.jxl

+

Can you tell me how consistent the main JXL encoder is? You mentioned it before.

+

I will be using the same video as when I showcased the speed of jxl inside of MPV. but I will go into a bit more depth on how to compare here. I used different settings here because I wanted to optimize more for quality then filesize, however the decode speed is the same.

+

So there are two methods we can use for encoding the image sequence, the easy method for this would be to use FFmpeg for this. While this is a valid method, I want to use a bit of a slower effort, and FFmpeg isn't great at parallelizing this. Instead, I will use FFmpeg to decode the image sequence into a series of PNG images. I am mainly doing this because it's fast enough, and using different formats makes this just a wee bit easier on me. The encode I will run is ffmpeg -i video.mp4 -c:v png img-seq\frame-%04d.png to decode the images to a folder called img-seq.

+

The next step is to encode each frame into JXL, for this Linux users can use parallel -j NUM cjxl -e 6 -d 0.5 --faster_decoding=4 {} {.}.jxl where NUM is the number of parallel threads. On windows you can use powershell like so ForEach -Parallel, or you can use a tool like rust-parallel which is a windows compatible parallel command execution tool similar to parallel. To use rust-parallel you can use ls -name | rust-parallel.exe -p -j 4 -r '(.*).(png)' cjxl.exe -e 6 -d 0.5 --faster_decoding=4 `{0`} `{1`}.jxl (The backticks are to escape the {} in powershell. They are needed because powershell uses them for expressions.)

+

It's easier to move the JXL and PNG images into their own folders so do that now. I will then decode the JXL images back to png, because the tool I will be using is ssimulacra2_rs and it uses vapoursynth for input. This makes it great for doing per frame testing, however the most of the common input tools on windows don't support JXL yet. If you are on linux you may be able to skip this step as the tools may support JXL depending on how they are compiled which may be up to your distro. On windows I can use ls -name | rust-parallel.exe -p -j 4 -r '(.*).(jxl)' djxl.exe `{0`} `{1`}.png to convert the frames back to PNG.

+

My main folder now has two sub folders. jxl/frame-%04d.png and png/frame-%04d.png where the png folder is the source, and the jxl folder is the encoded ones.

+

Next we create two vpy files that we can use them as inputs. This allows us to compare them as an image sequence instead of individually, this is a bit faster and lets us graph the output. using ssimulacra2_rs' video function create one file for JXL folder, and one file for PNG folder. the JXL folder is given below

+
import vapoursynth as vs
+core = vs.core
+clip = core.lsmas.LWLibavSource(source='jxl/frame-%04d.png')
+clip = core.resize.Bicubic(clip=clip, format=vs.YUV444P10, matrix_s="709")
+clip.set_output(0)
+
+

Finally execute this command to run the comparison program. ssimulacra2_rs.exe video -f NUM_THREADS -g png.vpy jxl.vpy. ssimulacra2_rs is quite slow for comparing videos since the original ssimulacra2rs was designed for single images, however it is quite an accurate method of comparing images/videos to one another. As we can with the graph below, JXL manages to exceed 90 ssimu2 the vast majority of the time, as is for sure good enough for a mezzanine format.

+

The output graph for JXL

+

How does this compare with prores? Good question using ffmpeg -i video.mp4 -c:v prores_ks -profile hq ... these were the scores I got.

+

Prores

+

Prores had a wee bit better average, but it's lows were worse then JXL's. However since anything close to and past a 90 is usually good enough for a mezzanine I would consider both good here. But how does file size compare? JXL: 1.26 GiB Prores: 4.15 GiB As we can see, JXL is less then half the size! We could probably use some of that savings to bump the quality some by using -d 0.25 instead, but for me it simply isn't necessary.

+

It could be nice to allot some more savings to faster decode, however at the current time, increasing libjxl's faster_decoding flag doesn't seem to do too much so increasing it further doesn't actually change anything, the image is the same size, and the decode speed is the same as well.

+

OK, JXL is kinda cool. I would use it but man, C++ sucks.

+

I hear you loud and clear. If it was any louder I might go deaf. I know the pain for sure. I myself am not the biggest fan either. However have no fear, Rust is here! Well for decoding at least. But still! jxl-oxide is a third party, native rust decoder, and while it isn't as fast as libjxl, it is still pretty fast. The important part of jxl-oxide however, Is not the dual MIT/BSD license, It's not that rust is the best programming language ever. It's this,

+
➜  jxl-oxide git:(main) RUSTFLAGS='-C code-model=small -C debuginfo=none -C opt-level=z -C strip=symbols -C codegen-units=1 -C panic=abort -C target-feature=+crt-static' cargo build --target=x86_64-unknown-linux-musl --release
+--SNIP--
+➜  jxl-oxide git:(main) ldd ./target/x86_64-unknown-linux-musl/release/jxl-dec
+        statically linked
+➜  jxl-oxide git:(main) ls -llh ./target/x86_64-unknown-linux-musl/release/jxl-dec
+-rwxr-xr-x 2 quack quack 3.4M Nov 19 23:11 ./target/x86_64-unknown-linux-musl/release/jxl-dec
+➜  jxl-oxide git:(main) upx --lzma --best ./target/x86_64-unknown-linux-musl/release/jxl-dec
+--SNIP--
+➜  jxl-oxide git:(main) ls -llh ./target/x86_64-unknown-linux-musl/release/jxl-dec
+-rwxr-xr-x 1 quack quack 966K Nov 19 23:11 ./target/x86_64-unknown-linux-musl/release/jxl-dec
+
+

That's right, with a simple cargo build command line, we can make a 3.4M binary to decode to either PNG or npy, with proper static compilation so it will run on almost any Linux system, and when we use UPX to compress it, it's a mere 966K. No need to muck about dependencies. No need for weird cross compile shenanigans and these sizes are without rebuilding std. (Note: I use -C opt-level=z here, but that doesn't save much space at all if you are using UPX)

+

Thanks to being rust, this also means you can easily use jxl-oxide crate in your projects. There is even a usable wasm demo too for people who would want to use it targeting that. (I didn't include it on this page due to the 16k woag image. Wasm might be fast, but no matter what wasm has it's limitations. However jxl-oxide does seem a bit more reliable then libjxl for this usecase.)

+

jxl-oxide recently (as of December 2023) has had some work done on internal colour management support. It may still have some limitations compared to djxl, but the primary benefits I've said before all still apply. You can export the wide gamut images to small gamut or vice versa. This of course can and likely will improve over time as well.

+

But how does it preform on that crappy laptop from before? Well not great, Jxl-oxide works fine on newer systems, but when you load it on this crappy laptop...

+
130 root@archiso ~ # hyperfine --runs 2 './jxl-dec mona.jxl'
+Benchmark 1: ./jxl-dec mona.jxl
+  Time (mean ± σ):     168.351 s ±  0.236 s    [User: 280.878 s, System: 3.043 s]
+  Range (min … max):   168.185 s … 168.518 s    2 runs
+
+hyperfine --runs 2 './jxl-dec mona.jxl'  563.19s user 7.00s system 168% cpu 5:37.72 total
+
+

Ouch. The results speak for themselves. However it's worth noting that this is a 32bit binary compiled with generic compile settings. This means it was built without even so much as SSE support which hurts a lot. That ram limitation also hits really hard, Keep in mind that this image is 5221x2847. Make no mistake however, jxl-oxide is still a competent decoder for any somewhat modern machine that has remotely close to a reasonable amount of ram. Of course, there are always optimizations to be made.

+
hyperfine --runs 10 'djxl mona.jxl --disable_output' './target/release/jxl-dec mona.jxl'
+Benchmark 1: djxl mona.jxl --disable_output
+  Time (mean ± σ):     200.3 ms ±  28.0 ms    [User: 1348.1 ms, System: 364.4 ms]
+  Range (min … max):   175.0 ms … 259.3 ms    10 runs
+
+Benchmark 2: ./target/release/jxl-dec mona.jxl
+  Time (mean ± σ):     561.8 ms ±  23.2 ms    [User: 1954.7 ms, System: 795.2 ms]
+  Range (min … max):   515.5 ms … 592.4 ms    10 runs
+
+Summary
+  djxl mona.jxl --disable_output ran
+    2.80 ± 0.41 times faster than ./target/release/jxl-dec mona.jxl
+
+

This was tested on a arch WSL2 instance on my Ryzen 2600. While yes, jxl-oxide is slower, it's certainly no slouch either. It is still well within reasonable speeds. Just make sure to either compile it with native optimizations, or simply spring for a 64bit PC instead.

+

A great colour space, Easy decoder, Amazing flexibility and speed to boot! What am I missing out on here?

+

Well, Support, JXL is still a new format, at the time of writing, On android, you wont find it any android gallery app that I know off, only a couple apps actually go out of their way to support it like the popular manga app tachiyomi. Since android doesn't support JXL there is little support here. Hopefully one day we might see official platform support here. I wish for at least an open source gallery app to pick it up.

+

Specifically for webbrowsers, you wont find too much support here with either with Chrome removing support, and Firefox being Firefox and ignoring the issue outright. However thanks to the community, there are many forks available of both browsers that have good support. I currently use Thorium and Waterfox on desktop, and Cromite on android. Also Webkit based browsers like Safari and Gnome web also support JXL officially for a while, but now thanks to apple pushing the ecosystem they have been enabled by default. I guess apple does what Firefox doesn't.

+

Linux support is great. However keep in mind that people in general have a misconception about how Linux handles these things. Linux relies on the application themselves to support nearly everything. There is no generic platform support for these. There are just... a lot of platforms to choose from some more generic then others. Thankfully however, independent app ecosystems are generally pretty good, both QT and GTK support JXL, dedicated tooling like imagemagick and FFmpeg support it too. So you shouldn't have too many issues here.

+

As alluded to above, Apple is doing great here. They are offering platform support for JXL on both IOS and OSX now so you should have zero issues here soon enough as long as you are up to date on any of the apple ecosystem devices.

+

As for windows, there is a plugin to add it to windows imaging component. It works ok, it's pretty limited but its... fine. It of course is not official support. Hopefully soon windows will decide to officially support JXL.

+

Is there anything I can do to help?

+

You can help push adoption by recommending it to people who can use it and doing so yourself. Asking your applications you use often to support it. Websites are already serving JXL too, and some like this page you are viewing won't even work 100% properly without JXL support. So you can report those websites as broken.

+

If you run a website, You can start serving JXL today. Many CDNs support JXL for customers, If you aren't serving a large amount of images on a single page, you always have the option to implement utilize WASM decoder. I have run into an issue that despite JXL decoding fast enough, image rendering will actually be paused while another image is decoding. If anyone knows how to prevent chrome and Firefox from doing this, send me a message on mastodon or perhaps even make a PR at this repo. I would love to get that working and enable it here.

+

Notes.

+

Color

+

In this article I took some liberties with the terminology and explanations to keep it simple for people who may have not come across these terms and don't have great knowledge into the subject matter. This was particularly done when talking about color.

+

Some liberties I took;

+
    +
  • Human based formats vs display based format. YUV/YCbCr would actually be closer to a human based format then a display based format.
  • +
  • I somewhat interchangeably used format and color space when color space should have been used.
  • +
  • HDR isn't a well defined term if at all defined. I used it to refer to a certain classification of transfers, however it has been historically used for other forms of "Dynamic range" too. the term "HDR" itself should be treated as buzzword unless it is refereed to in the context of some kind of spec.
  • +
  • I'm sure I took a lot more here too
  • +
+

For readers who wish to learn about the proper terminology, explanations, and gain a better in-depth knowledge into color. I highly recommend reading into these sources

+

Hitchhikers guide to digital color. The language is crass but the knowledge is first class for an introduction into color.

+

This is a git repo containing a large variety of information for color and HDR information intended for developers, The knowledge at this time isn't well structured, but it is a great repository of info.

+

Bruce Lindbloom's website has some of the best knowledge when it comes to some of the math behind the core concepts of working with color.

+

Cinematic color talks about color management in the context of media production.

+

A PDF about the history and some technical aspects of the XYZ and xyY colorspaces. A significant colorspace when it comes to detailing how human vision works and conversions between formats.

+

Speed

+

It likely is possible to gain more decode speed with libjxl by tinkering with the encode settings. However these are things I don't expect anyone to muck about with themselves. Even the test I modified will need someone to put some effort in to prepare their pipeline, as FFmpeg does not currently, at the time of writing this, decode JXL videos when muxed into MKV or Nut files without a patch. However this may change at some point.

+

Rust vs C++

+

Ok, No I don't actually hate C++, However some developers of various projects like renpy have complained about pulling libjxl into their build chain before and it not playing nice. For some projects, jxl-oxide might indeed be significantly easier to pull into their build chain, but as it stands, I don't believe jxl-oxide can actively be built as a library and directly integrated with other languages. I may be mistaken on this however.

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/blog/index.html b/blog/index.html new file mode 100644 index 0000000..3f6959d --- /dev/null +++ b/blog/index.html @@ -0,0 +1,338 @@ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + +
+
+

+ Waydroid, What is it, and How does it work? +

+ + + + + + 13 minute read + + + + + Published: 2024-01-17 + +
+ +
+

For those who read my Bliss article, This can be thought of a sort of companion peice to it, Both Waydroid and Bliss OS are Bliss Labs projects, a sort of family when it comes to android development, Waydroid and Bliss share a lot of the same technical aspects both being generic android x86 projects. Waydroid is becomming increasingly popular within the linux community but it seems to not be well understood. Hopefully this blog will shine some light on some of the topics that make waydroid unique to android ecosystem, without diving too far into technicals.

+ + +
+ +
+ + + +
+
+

+ Why aren't we using JXL? +

+ + + + + + 8 minute read + + + + + Published: 2024-01-14 + +
+ +
+

This is a mini part two in something I didn't think was going to become a series. but I wanted to test this specifically anyways. So here we go.

+ + +
+ +
+ + + +
+
+

+ Jpeg-xl is kinda cool. +

+ + + + + + 42 minute read + + + + + Published: 2023-11-20 + +
+ +
+
It is highly reccomended to read the notes section after reading this page.
+

While many of the benefits of JXL have been told a lot, there are some things about JXL that I really think should be more popular. JXL is so jam packed full of features that some of the real gems have been ignored. These gems are often features that can be utilized either today, or in a possible short term future.

+

Note: in this article, when I talk about HDR, I am talking explicitly about the intended "Brightness" of the image. HDR is a poorly, if at all, defined term. However the general use of it seems to be talking about the potential discernible brightness differences within an image. This is how it is used here.

+ + +
+ +
+ + + +
+
+

+ Per-site VPN using firefox and wireguard. +

+ + + + + + 11 minute read + + + + + Published: 2023-11-04 + +
+ +
+

NOTE: THIS IS UNSAFE

+

It turns out that sometimes when firefox boots the multi account containers will not load up. If this happens and you use a website you don't want to be used over your normal network THIS WILL COMPROMISE YOU The websites will load through your normal network. You have been warned.

+ + +
+ +
+ + + +
+
+

+ Easy video tips for CLI chads +

+ + + + + + 4 minute read + + + + + Published: 2023-11-02 + +
+ +
+

When it comes to encoding videos, FFmpeg is a ubiquitous tool, it's supported on nearly every modern platform, and many retro platforms, includes a plethora of encoders and decoders, and targets most currently used architectures.

+

for decoding videos and watching them MPV is easily king here, with a great deal of flexibility and customization, you can do a LOT to tune your viewing experience perfectly.

+

One of the downsides to using FFmpeg and other CLI tools is how many options they can sometimes have and scripting these options can sometimes be a complicated mess. This post will cover a few tips and tricks to make it just a little bit easier.

+ + +
+ +
+ + + + + + + +
+
+

+ Scrcpy as a good webcam. +

+ + + + + + 5 minute read + + + + + Published: 2023-11-02 + +
+ +
+

Scrcpy for a long time has been the best way to record On linux it is pretty simple since scrcpy supports V4L2, however windows is a bit of a different beast, this blog post will cover windows primairly because anyone running linux should be able to trivially figureout the parts they need via v4l2loopback.

+ + +
+ +
+ + + +
+
+

+ Why I'm in no rush to sell Linux PCs. +

+ + + + + + 11 minute read + + + + + Published: 2023-11-02 + +
+ +
+

For those who know me, it may come as some surprise that I don't sell, nor recommend selling linux PCs to the general consumer market. I as a large linux fan don't recommend linux for the general audience for a couple of reasons.

+ + +
+ +
+ + + + +
+ +
+
+
+
+
+ + + + diff --git a/blog/page/1/index.html b/blog/page/1/index.html new file mode 100644 index 0000000..0d269d6 --- /dev/null +++ b/blog/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/blog/page/2/index.html b/blog/page/2/index.html new file mode 100644 index 0000000..e2c8858 --- /dev/null +++ b/blog/page/2/index.html @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + +
+
+

+ Emulation, Virtualization? An introduction to Technical Term Tomfoolery. +

+ + + + + + 4 minute read + + + + + Published: 2023-11-02 + +
+ +
+

When it comes to virtualization, there's so much that can be confusing when you start learning since there can be a lot of seemingly contradictory things. This is primarily due to what I call technical term tomfoolery, this post will serve as an introduction to emulation technologies, as well as learning how to navigate technical term tomfoolery.

+

I want to preface this with stating that I am in no way, a hypervisor or VMM developer. I have hacked on them in the past, but my knowledge here is only of the fundamentals and usage of VMMs and hypervisor technologies, not developing them.

+

The first thing to know when trying to learn computer related skills, is that the real world and the computer science world operate on two separate dictionaries. I'm not sure when this started, but it's been around for quite a long time. However instead of a history lesson this will primarily focus on virtualization.

+ + +
+ +
+ + + + +
+ +
+
+
+
+
+ + + + diff --git a/blog/persitevpn/index.html b/blog/persitevpn/index.html new file mode 100644 index 0000000..3d641e6 --- /dev/null +++ b/blog/persitevpn/index.html @@ -0,0 +1,226 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Per-site VPN using firefox and wireguard.

+ + + + + + 11 minute read + + + + + Published: 2023-11-04 + +
+
+

NOTE: THIS IS UNSAFE

+

It turns out that sometimes when firefox boots the multi account containers will not load up. If this happens and you use a website you don't want to be used over your normal network THIS WILL COMPROMISE YOU The websites will load through your normal network. You have been warned.

+

Per-site VPN using firefox and wireguard (or other VPNs)

+

The basic Idea of this is pretty simple, Firefox has an offical addon called Multi-Account containers. This allows you to create and use pseudo profiles. These profiles allow you to isolate cookies, And use mozilla VPN. But "wait" you say, "Mozilla VPN sucks!" I agree! thankfully Mozilla may not allow us to use wireguard VPNs from within firefox let alone the extension, Socks5 proxies are supported.

+

Why do we need wireguard at all?

+

Aside from the obvious "My VPN provider doesn't support socks5." Socks5 doesn't have inherent encryption, this means a misconfigured setup has a significant chance of de-anonymization occurring. There are many obvious reasons why this is bad. Due to this, It's often better to use a more reliable and easy to use VPN solution like Wireguard or OpenVPN.

+

What methods are available to us?

+

There are many ways of doing this. Their are main two ways we will focus on today. The first will be using ShadowSocks + a VPN, the other method will be to use a tool called wireproxy. While many other options do exist. These are what this guide will be focusing on.

+

Wireproxy

+

Wireproxy is the easiest but comes with a sad disadvantage. Firstly however, Wireproxy handles both the VPN and the socks interface. You can set it up really easy by making a simple config file and launching it with a cmdline. This makes wireproxy really fast to setup, on linux you can easily do this via start configs, create a systemd service (my prefered method) or any init system autostart system you prefer. On windows, you can easily set this up using windows' task scheduler. Simply follow the instructions below for that.

+

The unfortunate detriment that comes with wireproxy is that when inactive for a time, the VPN portion goes into a "sleep" mode. meaning when you open a website you will be greeted with a "Hmm. We’re having trouble finding that site." error from firefox. Simply wait a few seconds then refresh the page. It should then be working.

+

Windows setup

+

Setup is quite nice and easy, First download wireproxy from their Github repo., (You may need to click "Show all assets" to download the windows version. you will want wireproxy_windows_amd64.tar.gz. Don't worry if you havent seen this file extension, Both windows file explorer and 7z can extract it perfectly fine, (though you will need to extract it twice). Copy the executable to somewhere you will remeber it (I choose C:\Tools).

+

The next step is to go to your VPN provider and download the appropriate wireguard config file. I cannot help you here as this will vary in a per vpn basis. Place this file somewhere you will remember (for me I will do C:\Tools\VPN).

+

Now that you have downloaded the wireguard config file, we need to create the file. No worries, this will be mostly copy and paste with some minor edits. Create a text file with the extension .conf. This is my file contents.

+

Filename: C:\Tools\VPN\wireproxy-swiss.conf

+
WGConfig = C:\Tools\VPN\Swiss-vpn.conf
+
+[Socks5]
+BindAddress = 127.0.0.1:25344
+
+

Nice and simple right? Simply point WGConfig to the config you downloaded from your vpn provider. then for the [Socks5] BindAddress part 127.0.0.1 is the IP address for your local PC. this is often also called localhost. :25344 is the important part, This is the part we will need to change for additional configs. This is called a port if you think of an IP address you can think of it as an Apartment building. The port is the room number inside of the building.

+

Each network service uses ports. Without getting too technical, Every VPN instance we run will be it's own network service, as such each one needs a unique port number. For each subsequent VPN we run, we should +1 to that number. This will keep things organized and simple to remeber.

+

Now that we have the file running, we are on the last part of the non firefox stuff. in your start menu, search Task Scheduler and launch the app that comes up. At the top of the rightmost bar you should see Create basic task, run it. Give it a good name and description! I'll do the below.

+
    +
  • Name: Swiss socks5 VPN
  • +
  • Description : Swiss VPN using Socks5 for firefox
  • +
+

Click Next >, Tick "When I log on", Click Next >, "Start a program", Click Next >. and Finally we add the command to start the proxy and fill it as follows:

+
    +
  • Program/script: "C:\Tools\wireproxy.exe"
  • +
  • Add Arguments: "-d -c C:\Tools\VPN\wireproxy-swiss.conf"
  • +
+

Click Next >, Tick "Open the properties dialog for this task", Click Finish. You now want to set "Run whether user is logged on or not". This is necessary because windows is a really stupid OS, and if it's not set, you will get a black window opening up running the program! not something we want at all. Finally click OK. After this you can either reboot your PC, or right click the task we made, and click Run You can now go to the firefox section at the bottom of the page.

+

Linux

+

The guide for this is mostly 1:1 with windows with the exception of scheduler and file paths. Making a systemd service is easy enough, Simply create a file with a .service extension, naming it whatever you please, Locate it in this path $HOME/.config/systemd/user/SERVICENAME.service and enter the appropriate contents. it may look something like this

+
[Unit]
+Description=Descrptive description
+After=network.target
+
+[Service]
+Type=simple
+ExecStartPre=/bin/sleep 2
+ExecStart=wireproxy -d -c PATH/TO/Wireguard.conf"
+
+[Install]
+WantedBy=default.target
+
+

Other Init systems or autostart methods will not be covered here as systemd covers the vast majority of installs now, anyone not using systemd I will assume have the knowledge to set up an autostarting application themselves anyways.

+

Shadowsocks

+

Shadowsocks can be a really complicated program, however thankfully for us, Shadowsocks provides sslocal. sslocal is a CLI client that makes configuration really easy. The major detriment to shadowsocks is that it doesn't handle connecting to a VPN for us.

+

Setting up a VPN to not route any default traffic unless the application explicitly wants it can be pretty hard sometimes. Sadly the official wireguard client on windows doesn't support split routing which could be used for this and on linux. well it's linux, well into DIY land. However I do have instructions on how to set this up for those savvy enough.

+

Windows VPN setup

+

An example dummy Windows conf is given below on how to setup a wireguard VPN to not preform routing of data by default. Note the postup and predown scripts. A brief description is as follows

+

Table = off In networking, we call the connections from one place to another Routes. and the Table is a list of these potential routes and their priorities. Unfortunately for us. the official wireguard client isn't all THAT smart as it doesn't allow us to set metric which is the priority an interface has. But thankfully some powershell scripting can help us out. so we tell wireguard "Don't add the new routes to the routing table" This means nothing would be able to actually use this connection. which is where the next step comes in.

+

PostUp = allows us to run commands on the host before the after the interface goes live. Since we turned off routing table, we need to manually add it ourselves

+

PreDown = Like wise, since we added the connection, but wireguard's table is disabled, we also need to manually remove the route. Leaving it running could lead to unwanted behaviors and bugs. so best to remove it outright

+

NEVER RUN UNTRUSTED SCRIPTS

+

If you cannot read what this is doing Do not run it. I will not explain what this does. Running any sort of script from the internet is dangerous. USE wireproxy! for all you know, this could install a keylogger (and YES! It could be this small of a script!) or delete all your pictures. I was conflicted on whether or not to add this. However I think it will be much more beneficial if I do. (though wireproxy could be malware too, welcome to the internet and modern computing. at least you can isolate it if you know how.)

+
[Interface]
+PrivateKey = thisisgarbageinputthatrepresentsaprivatekey
+Address = xxx.xxx.xxx.xxx/32
+DNS = xxx.xxx.xxx.xxx
+PostUp = powershell -command "$wgInterface = Get-NetAdapter -Name %WIREGUARD_TUNNEL_NAME%; route add 0.0.0.0 mask 0.0.0.0 0.0.0.0 IF $wgInterface.ifIndex metric 9999; Set-NetIPInterface -InterfaceIndex $wgInterface.ifIndex -InterfaceMetric 9999;"
+PreDown = powershell -command "$wgInterface = Get-NetAdapter -Name %WIREGUARD_TUNNEL_NAME%; route delete 0.0.0.0 mask 0.0.0.0 0.0.0.0 if $wgInterface.ifIndex metric 9999; Set-NetIPInterface -InterfaceIndex $wgInterface.ifIndex -InterfaceMetric 9999;"
+Table = off
+
+

Linux VPN setup

+

For linux things are both easier and more complicated Linux has many, MANY ways to handle VPNs. I will be using network-manager for this since it will apply to most distros out there. if this doesn't apply to you, you likely already have the knowledge to implement this yourself. In theory, it should be really simple since nearly all networking tools allow you to set the interface priority/metric. however it turns out network-manager is kinda stupid, and has a completely seperate config for DNS priority, and when you set a VPN, it get's priority regardless of Interface priority!

+

WHY

+

Download your wireguard config. No need for modifications to this one. then run this commands.

+
connection import type wireguard file wireguard-swiss.conf
+nmcli con modify wireguard-swiss  ipv4.dns-priority 9999
+nmcli con modify wireguard-swiss ipv4.route-metric 9999
+nmcli con modify wireguard-swiss ipv4.never-default yes
+nmcli con modify wireguard-swiss  ipv6.dns-priority 9999
+nmcli con modify wireguard-swiss ipv6.route-metric 9999
+nmcli con modify wireguard-swiss ipv6.never-default yes
+nmcli con modify wireguard-swiss connection.autoconnect yes
+
+

We run these commands since we want only connections that explicitly want to use the vpn to use it. After running these commands the new interface should come up every time you log in, and the priority should be the lowest possible each time.

+
Shadowsocks setup
+

Just like on setting up wireproxy, This uses mostly the same guide. On linux, Create the systemd file, enable it. the command to execute is this sslocal -b 127.0.0.1:25344 --outbound-bind-interface VPN-NAME-HERE. Use 127.0.0.1 to bind to localhost on port 25344. then use --outbound-bind-interface VPN-NAME-HERE it's important to keep note that the vpn name is case sensitive.

+
[Unit]
+Description=ShadowSocks5 Proxy
+After=network.target
+
+[Service]
+Type=simple
+ExecStartPre=/bin/sleep 2
+ExecStart=sslocal -b 127.0.0.1:25344 --outbound-bind-interface wireguard-swiss
+
+[Install]
+WantedBy=default.target
+
+

One thing that is good to keep note of is the ExecStartPre command, this simply runs sleep for 2 seconds to make sure every thing else gets up and running first. This may not be necessary, but IMO it's nice to have anyways for a bit of extra reliability. Not to mention, I doubt you could launch a firefox session, open a web page, and have it error out before this manages to start up. If you do manage this, you can always lower the sleep delay.

+

Firefox

+

Finally for the last steps, Setup the addon Multi-account containers for firefox. Firstly Add the addon to your browser using firefox's addon page for it. Then open the addon window, Click Manage containers, and then New container. Give it a good Name, Colour, and Icon.

+

You will then go again, click Manage Containers, Click the Container you just created, and then click Advanced Proxy Setup. Add the socks5 IP and port we chose, in my case it is 127.0.0.1:25344. However we need to prefix it with socks:// to tell firefox it is a socks vpn, so I would enter socks://127.0.0.1:25344. Finally click apply, and you are done!

+

You can now use this profile to browse webpages with that VPN without interrupting any other traffic on your PC. Simply repeat the instructions you had to go through again, using a different port and VPN server to create new profiles that have different VPNs.

+ +
+ + +
+
+

+ + + + in Tutorial + + + and + tagged + + Linux + + + + , + + + + + Windows + + + + and + + + + + VPN + + + + + +

+ + +
+ + + + +
+
+
+
+
+ + + + diff --git a/blog/repairmanstips/index.html b/blog/repairmanstips/index.html new file mode 100644 index 0000000..9f4583d --- /dev/null +++ b/blog/repairmanstips/index.html @@ -0,0 +1,188 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Computer repairman's cheap tricks and tips for a better time.

+ + + + + + 6 minute read + + + + + Published: 2023-11-02 + +
+
+

Repairing computers can be long and tedious processes, these tips and tricks can make doing it just a little better on the provider.

+

Qemu

+

Probably the single most valuable tool in my toolkit is Qemu. Qemu allows you to emulate a PC. This has so many benefits. Primarily I can repair 10-20 PCs all at the same time using a single PC set up right. A simple HBA, plenty of ram, and a decent CPU is all you need. This also has the benefit of being significantly faster than many old systems that can be ram or CPU limited. However it also allows you to clone and clean drives in preparation for an upgrade as I will talk about more in the next segment. For users looking to try it themselves, here is a simple command line you can use to try this yourselves assuming you have a linux server for working with.

+
sudo -E qemu-system-x86_64 -accel kvm \
+    -m 4096 -smp 4 -cpu host -bios /usr/share/OVMF/x64/OVMF.fd \
+    -display gtk -device qxl -drive file=/dev/sdd,if=none,id=drive-disk0,format=raw \
+    -device ahci -device ide-hd,drive=drive-disk0,id=virtio-disk0 \
+    -net nic,model=e1000 -net user -boot menu=on -usb -device usb-tablet
+
+

breaking this down, let's see what we have:

+
    +
  • +

    sudo -E : This command allows us to run the program as root, the -E part "preserves environment" which is needed on wayland so we can actually have the display connect to the machine.

    +
  • +
  • +

    qemu-system-x86_64 : This is the application name

    +
  • +
  • +

    -accel kvm -m 4096 -smp 4 -cpu host -bios /usr/share/OVMF/x64/OVMF.fd : These set up the machine, Basically use 4GiB of ram, 4 cores, and pretend the emulated CPU is the hosts, this can have small preformance benefits

    +
  • +
  • +

    -display gtk -device qxl : This is setting up the display, it will emulate a graphics card called qxl "a very simple thing since we only need basic graphics support" and create a window to show the VM on using a toolkit called gtk, other backends exist, but this isn't important to us, as long as the window opens up, we are good.

    +
  • +
  • +

    -drive file=/dev/sdd,if=none,id=drive-disk0,format=raw -device ahci -device ide-hd,drive=drive-disk0,id=virtio-disk0 : These are what allows us to boot from the disk drive. the /dev/sdd is the drive we wish to boot from.

    +
  • +
  • +

    -net nic,model=e1000 -net user : This allows us to connect to internet, It's not always a good assumption that you should connect, Whether or not to add this is up to your discretion.

    +
  • +
  • +

    -boot menu=on : This allows us to choose a boot menu, this can be helpful as qemu doesn't always like to boot from the drive we pass through

    +
  • +
  • +

    -usb -device usb-tablet : This defines how the mouse behaves, I don't like the VM grabbing the mouse, so when you add this the VM behaves more like a normal application.

    +
  • +
+

Windows qemu

+

While you can use qemu on windows (simply use -accel whpx,kernel-irqchip=off and find the drive name and path using wmic diskdrive list brief and enter like so file=\\.\PHYSICALDRIVE4,if=none, instead. Qemu on windows when doing this can be quite fragile (though seemingly faster when it DOES work) as whpx can often crash in a variety of ways and haxm is discontinued.

+

it could be possible that using usb-uas instead (usb scsi) guide here could lead to better preformance and reliability when running under windows

+

So instead you can also use Hyper-v on windows, It can be a bit slower then qemu, but at least it reliably works, it's still fast, and most importantly still free. I wont give the entire run down as this is not a hyper-v guide, however the key checklist, In order, is as follows

+
    +
  • the disk is off line
  • +
  • create a new vm - attach disk later
  • +
  • checkpoints are disabled
  • +
  • add new scsi - hdd - physical disk
  • +
  • set boot from hdd higher priorty in Firmware tab
  • +
  • Disable checkpoints
  • +
+

If at any point, something goes wrong (like reporting checkpoint error or drive not showing up), close the entire hyper-v window and try again. You may have done something out of order, and hyper-v is terrible at updating itself live.

+

Local server

+

Every repair technician should have a 10+Tb array for short term data storage. Not only does this allow you to save your tools, any media related stuff and other business related information, It can be used to help repair computers to a much faster degree and much more safely.

+

Being able to clone a customers drive is great for many reasons, First one is obvious, It gives you a safety net in case you accidentally lose data, or the drive dies in your care. Having a backup of that data will never be a bad idea.

+

Secondly you have the qemu use I talked about above. If you are preparing to migrate your customer to another drive, you can clone the old drive to your storage array and boot from that instead, This can be exponentially faster then an HDD, and I'm sure many of you know that working from an HDD can actually be an out right costly endeavor as it takes up valuable time that could be spent elsewhere. This also allows you to work around potential drive faults. If the customers hard drive is failing, and you need to clone it to a new device, doing a clone directly to your storage array can help recover any data that fails, or in the case of a premature failure causing the clone to fail, there is a higher chance of being able to rescue what data was there in the first place.

+

There are other benefits too, One of the services you could provide given a sufficient storage server size, would be short term data retention and backup services. This is something I personally do with customers who wish to upgrade computers or hard drives for free, as well as a paid service by itself.

+

Boot tools.

+

Boot tools are an invaluable part of any repair technician's toolkit, These can boot on a PC that are otherwise completely unusable due to a corrupt or bad boot device, Virus preventing boot, and more issues.

+
    +
  • Hirens
  • +
  • aio srt
  • +
  • winpe tools
  • +
+

This is incomplete, More will be added in the future

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/blog/scrcpycamera/index.html b/blog/scrcpycamera/index.html new file mode 100644 index 0000000..b0f9fdf --- /dev/null +++ b/blog/scrcpycamera/index.html @@ -0,0 +1,167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Scrcpy as a good webcam.

+ + + + + + 5 minute read + + + + + Published: 2023-11-02 + +
+
+

Scrcpy for a long time has been the best way to record On linux it is pretty simple since scrcpy supports V4L2, however windows is a bit of a different beast, this blog post will cover windows primairly because anyone running linux should be able to trivially figureout the parts they need via v4l2loopback.

+

Phone setup

+

Scrcpy now supports recording the camera itself. However due to this being an A12+ only feature, it will not be focused on here, though keep in mind the setup will be the same, just without the need for opencam or filmic.

+

There are a couple ways of setting up the phone for this, however when it comes to quality, two methods by far are the best. OpenCam and Filmic, this post will focus on opencam. The quality is still great, even if not quite as good as filmic. However opencam is free and will be the most accsessible, and quality is still great.

+

The first thing to do is install opencam, I typically use fdroid opencam, but downloading the apk or using google play store should work fine too. In opencam settings, Click on Onscreen GUI... and sert Immersive mode to Hide everything this will be very important as we will be doing screen recording.

+

Now when you back out to the main camera screen and wait a bit, the screen should disappear showing only the camera itself. as far as android stuff goes, we are already done, plug your phone into USB, prop it up on a mount pointed at whatever you please and you are good to go.

+

Scrcpy recording

+

Scrcpy does allow us to record to a file directly using;

+
scrcpy -N --stay-awake --turn-screen-off --record-format=mkv --record=filelocation
+
+

However because it is using ffmpeg under the hood, it also allows us to use other forms of arbitrary recording. So a brief run down of common protocols we could use to achieve this.

+

It's important to keep in mind that scrcpy only supports mp4 and mkv containers, so ideally we would work with just one of those.

+
    +
  • named pipes: sadly these won't work as windows named pipe support is not great at best, and at worst completely unusable
  • +
  • sockets: also sadly, not an option as windows doesn't really support the type of sockets we can easily use. this isn't a great option
  • +
  • file descriptors: not really an option here, but for more custom solutions it could be
  • +
  • http streaming: this could be a potential solution, however without -listen 1, this won't be a very viable solution as we wont have an http server to work with
  • +
  • Rtmp: could work with some minimal work on scrcpy's side. it would need us to add FLV streaming support.
  • +
  • rtp/rtsp: possible solution however muxing other formats into rtp can sometimes cause quality degreddation, so it's not highly reccomended.
  • +
+

So this leaves us with three real contenders;

+
    +
  • udp: this can work, but has high latency, investigate this
  • +
  • Rist: A possible solution
  • +
  • srt: A possible solution
  • +
+

SRT is one of the most promising solutions, it is both low latency as the video shows, and sadly, the video I record suffers from compression artifacts so I cannot showcase the quality of the stream too, however it is just about excellent. not quite as good as a direct feed. but on windows, we will take what we can get.

+
INSERT VIDEO HERE (sorry haven't gotten around to recording it)
+

Some other benefits that using SRT offers is fairly easy setup, and expanded codec support, while it won't be beneficial here. keep in mind that SRT + MKV will support all the formats MKV supports. This includes formats like AV1 and Flac.

+

So the scrcpy command to record the video and stream it over SRT is below this. There are a couple optimizations we can do such as setting bitrate from the phone to the PC. it's important to set the bitrate to something you can reliably do, go too low and you will get artifacts, go too high and you won't be able to sustain the connection.

+
scrcpy -b100M --stay-awake --turn-screen-off --record-format=mkv --record="srt://127.0.0.1:9990?mode=caller&transtype=live&latency=0&recv_buffer_size=0"
+
+

After that inside inside of OBS simply add a new media source, uncheck file and add srt://127.0.0.1:9990?mode=listener&transtype=live&latency=0&recv_buffer_size=0 to it.

+

Conclusion and extra tidbits

+

With this you should have a fairly low latency, but still high quality feed to OBS. better then droidcam. It is possible this could be better in the future.

+
    +
  • Windows does support named pipes, however they are quite the hassle to deal with. However an avid scripter or programmer could probably get this to work.
  • +
  • Optimizations inside of scrcpy for quality and latency could be done
  • +
  • Supporting something like dshow could be a viable solution for scrcpy, as it does support v4l2loopback on linux.
  • +
  • OBS could support scrcpy as a plugin
  • +
+ +
+ + +
+
+

+ + + + in Tutorial + + + and + tagged + + Linux + + + + , + + + + + Windows + + + + and + + + + + Video + + + + + +

+ + +
+ + + + +
+
+
+
+
+ + + + diff --git a/blog/sellinglinux/index.html b/blog/sellinglinux/index.html new file mode 100644 index 0000000..73a6c56 --- /dev/null +++ b/blog/sellinglinux/index.html @@ -0,0 +1,145 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Why I'm in no rush to sell Linux PCs.

+ + + + + + 11 minute read + + + + + Published: 2023-11-02 + +
+
+

For those who know me, it may come as some surprise that I don't sell, nor recommend selling linux PCs to the general consumer market. I as a large linux fan don't recommend linux for the general audience for a couple of reasons.

+

Applications and the no average pilot problem.

+

One of the things I hear the most when talking about the suitability of linux for the average user is that "It does 90% of what a users need". I think this is without a doubt true. Linux has good office programs, web browsers, and even a plethora of games now. There really is no shortage when it comes to app library for the linux desktop.

+

This is where the music stops. Yes, Linux covers 90% of what the average user needs. However I highly recommend reading this article. The same issues apply here that did then. Many users have individual needs that fall outside of the 90% of applications that are touted.

+

From my personal life alone, I have 3 anecdotes of such issues. My mother is an avid crocheter. The application she is looking to use has no support at all for linux. Wine may be a viable option here, However managing and maintaining wine for the average user is simply out of the question. Handling dependencies alone is enough of an issue that I don't see this viable. The argument "Just user another program" doesn't really apply here. I have yet to find a single application good enough for her needs that supports linux.

+

My father is another story too. My father uses this CNC machine that needs a windows application to run. We did get a little lucky here as an open source project that adds support to Inkscape does exist. However the difficulty of using Inkscape in comparison is quite high. While my father is rather technologically literate, and could probably figure it out, the time investment into doing so would be far to high.

+

And of course there are other applications and hardware too. There are still a lot of Elgato products that are missing support. Yes, the solution of buying a different product does exist, but it's not always a great deal. As I have already said, I don't see wine as a valid solution to this problem yet. Until dependency management is handled almost as well as it is on windows, I don't see it happening any time soon.

+

Accessibility, Flexibility, and Fragmentation.

+

Linux has never been particularly great in the fragmentation realm. The two largest DEs have often had their own applications uniquely tailored and supported by their own DEs. However outside of that, fragmentation wasn't really That bad. This was the case until wayland came along.

+

Wayland solves a lot of problems, but it also creates them too. Mainly fragmentation, and fragmentation is bad for flexibility and accessibility. For good accessibility support. One needs to be able to pick and choose the components they need. Screen readers, OSKs, Overlays, Attention management software, etc.

+

A11y needs a LOT to have a good time. many OSKs are now DE specific. Overlay software is too, with some overlay stuff not working on gnome due to relying on stuff like wlr_layers. And then you have stuff like Activity Watch, which some users rely on to help manage time. This issue was filed in 2019 and it's still an issue today. The fragmentation that has come from Wayland is a massive net loss in terms of usability for many people.

+

Some times it feels like the problem with fragmentation isn't being taken seriously. And this is even worse from a flexibility perspective. I used to use background managers since I like having a really customizable background. The tool I currently use for this on sway is mpvpaper. It allows me to use MPV for the background which is really nice for me. It only works on wlroots and smithay compositors at the moment. It relies on wlr_layers protocol and kwin's background service seems to override it whenever focus is lost.

+

Since gnome does not support the protocol no luck here. There is a proposal in the wayland protocols gitlab repo for ext-layer-shell which implements it mostly, but comes with the caveat that it is a privileged protocol. Meaning that applications may not be able to rely on being able to use it.

+

We are relying on wayland and xdg for implementing features we need to interact with other apps and the DE. However these protocols seem to put more stock into security then flexibility, with this quote being taken from the issue above.

+
+

I'm still of the opinion that we don't want sandboxed apps to get into managing foreign toplevels. That is fundamentally a privileged operation

+
+

I am of the opinion that one of the benefits of a sandbox is being able to pick and choose the permissions we want to expose to it. So what do I do? We used to be able to say, "Well you should go and implement it yourself!" This no longer holds water, as we have to either get XDG to agree, or the wayland protocol maintainers to agree to accepting such a protocol. and if they don't want it, you are kind of SOL. But even if they accept it as you can see, There is no guarantee for support.

+

Needs more then a spit shine.

+

Linux usage in general has a significant lack of polish. If you open youtube video in one firefox tab, and then another video in a separate tab, Firefox will show twice in the audio panel. This is a minor thing, but there are a LOT of minor things, this just happens to be one that's very common now. Each DE is chalk full of small issues. Kwin is a little buggy, the settings menu can be complicated to navigate, and has plenty of small issues like design inconsistencies, and has some really bizarre names sometimes.

+

Gnome manages to some how chug on lower end hardware when doing not much else then opening firefox, needs 3 separate applications for settings, Doesn't support server side decor, and more. Xfce has issues, Cinnamon has issues, so on and so forth. You can never escape these small issues that can really have a tendency to grate on you after a while.

+

Linux has a lot of these issues. While in isolation they are fine, when you combine the other issues it really does become a bad experience. Thankfully I do actually have a lot of hopes for System76's Cosmic DE when it comes to polish. I have been testing what they have already I have high hopes for it.

+

Support.

+

When I sell a computer, I commit to supporting the said machine. With windows or OSX, there is a degree of reliable support for the OS and applications. With linux this puts a lot of burden onto me compared to something like OSX or Windows. Every time a customer has a question or an issue needing fixed, That's time I need to spend that I could be doing other work.

+

With linux because of the issue above, I need to spend a lot more time on supporting these users, Helping out with A11y setup. Answering questions, trying to help setup applications or find alternatives. I don't really have an issue with these in isolation, However when these issues happen on too often of a basis, it becomes simply too much of a time sink to support it.

+

I of course would be willing to invest time in being an early adopter. However I simply can't do it when it means that other aspects will suffer. Being able to support both gnome and KDE users will be simply too much of a time investment, and as I have already stated, I cannot support just any one of these, since that simply isn't flexible enough.

+

So how do we get to the year of the linux desktop?

+

We need to chip away at these issues. The lowest hanging fruit is wine. By having good wine support and integration in a way that can make using wine as seamless as it would on windows, That would be one major problem solved. This would single handedly over night fix the "No average pilot problem". This of course seems like the largest hurdle, and it's something people have been working on for a long time now. We need at least one distro to fully commit on supporting wine OOB to the best extent possible, and some tooling to make dependency management a problem of the past.

+

The next stop would be a fragmentation. The fragmentation occurring in linux right now is a very aggravating problem. You have video recording applications that are DE specific. You have Docs and output management tools that are DE specific. OSKs that are DE specific. It has become a large enough problem that asking for DE has become a knee-jerk response when someone asks "What's the best tool for doing x thing".

+

Accessibility is a large problem on linux. It's not one that cannot be solved, but it needs a lot of thought put into it, and a lot of flexibility. People underestimate the large variety of needs people for accessibility. Screen readers, Magnifying glasses, Specialized OSKs, Attention management tools, Overlays, Wye tracking, Live Captions and Dictation etc. And this is only the tip of the iceberg.

+

Without the appropriate levels of flexibility in how an application interacts with a DE and other applications, It can be very hard to appropriately develop apps that accommodate these needs. I'm not sure how best to fix these issues, It's not something any single company can do, In the end, you either get XDG or Wayland protocols to support it, Or create a new protocol group and try to get the existing DE's to accept them, good luck with that.

+

The other section I talked about was polish, This is the most feasible to tackle as this is something a single group can feasibly do. In fact I really do have high hopes for S76's Cosmic DE as they are directly working on this specifically. While you will never escape all applications, as long as the core DE itself is good, then it will be a major improvement.

+

There are other smaller issues, but these are the major ones I have that prevent me from selling Linux PCs. Some issues like hardware support are simply not going to be resolved, But thankfully, in isolation these should be a small minority of problems.

+

So what will I be trying?

+

I do have hope for the future, but I don't think it will come any time soon, I will be testing Pop OS with predominately flatpak to see how that works out. I think pairing that with a wine manager that can handle dependancies a bit better then what we currently have could be a really good setup as S76 seems to focusing on making a good desktop experience.

+

Ubuntu I have had nothing but issue after issue after issue. It's honestly not something I will be investing any more time into trying, for everyone who likes ubuntu, that's great. I will not be reccomending at any point going forwards.

+

Fedora and derivatives were at one point on my radar, particularly nobara. But recent stuff with the fedora community and RHEL has pushed me away from them. One thing that irked my in general was when they had made the proposal for telemetry that has an "on by default at installation time" setup. Where they specifically acknowledge, if you give the user a choice, even if they can't skip over it, that users will opt out. The RHEL team that made the proposal, explicitly acknowldged that they were hoping that users would skip over it, even if it's something they don't want.

+

I have never, and doubt I ever will, reccomend an arch derivative, that isn't making a large amount of effort to become their own OS. To explain what this means, SteamOS is very much it's own thing. Valve has put a lot of effort into making sure SteamOS remains a stable and good experience. This is in contrast to operating systems like Manjaro, who's selling point is being an arch derivative. They still greatly rely on the AUR, and their experience suffers because of this. You often run into bugs that you simple don't usually experience when using other operating systems.

+

Arch itself I do still reccomend to people explicitly looking, and willing to put the effort into learning and maintaing their own OS. Arch can be very rewarding for users, even new ones, who are willing to invest in it. This makes it a decent choice for a very specific type of new linux user.

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/blog/technicaltermtomfoolery/index.html b/blog/technicaltermtomfoolery/index.html new file mode 100644 index 0000000..77f74c1 --- /dev/null +++ b/blog/technicaltermtomfoolery/index.html @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Emulation, Virtualization? An introduction to Technical Term Tomfoolery.

+ + + + + + 4 minute read + + + + + Published: 2023-11-02 + +
+
+

When it comes to virtualization, there's so much that can be confusing when you start learning since there can be a lot of seemingly contradictory things. This is primarily due to what I call technical term tomfoolery, this post will serve as an introduction to emulation technologies, as well as learning how to navigate technical term tomfoolery.

+

I want to preface this with stating that I am in no way, a hypervisor or VMM developer. I have hacked on them in the past, but my knowledge here is only of the fundamentals and usage of VMMs and hypervisor technologies, not developing them.

+

The first thing to know when trying to learn computer related skills, is that the real world and the computer science world operate on two separate dictionaries. I'm not sure when this started, but it's been around for quite a long time. However instead of a history lesson this will primarily focus on virtualization.

+

So what IS an emulator?

+

Emulators are things which emulate. Simple enough, so what is to emulate? There are a few ways of saying it, but in the end it boils down to "intentionally imitating something".

+

Virtualization

+

To start things off, let's start off with the classic "it's virtualization, not emulation". This is a fairly common quote, so you may have heard it before. This is the equivalent of saying, "It's a Dodge Charger, not a Car". Virtualization is merely hardware accelerated CPU emulation. You are still imitating another computer environment. Using this method you typically take the CPU calls the kernel makes, and send them to the host CPU. However there are CPU calls and memory addresses that you really don't want a VM to to access. Hardware acceleration lets the computer emulate these calls instead of passing them directly to the host with very low overhead.

+

It's also with noting para-virtualisation, which is an interface which emulates the connections between hardware and other components or the components themselves. The most common implementation of these that users would actually be aware of is Qemu's Virtio devices. Qemu has a plethora of para-virt devices as well as fully emulated devices. Using para-virt or "Virtio" devices when possible can reduce overhead by a decent chunk due to instead of emulating devices, you are providing an emulated interface usually, to real devices or APIs, but this is not always the case.

+

Wine too?

+

Wine is another classic example of this phenomena where technical dictionary and the real world dictionary are two completely separate things. Wine is an acronym actually for wine is not an emulator. As wine does provide an windows like environment when running on Unix, I would for sure consider this a class of emulation. In fact, they actually do address this... I recommend reading wine's FAQ on this in where they term it far more correct "Wine is not just and emulator".

+

Containers as well.

+

Wine and Virtualization are not alone in the technical term tomfoolery when it comes to emulation. Containerization programs such as LXC and Docker also do this. With virtualization, you are emulating the connection between hardware and kernel. However with containers you are instead emulating the connection between userland and kernel. This is a really interesting technique since it does allow you to, in theory share all devices on a machine. Of course this poses it's own issues, for instance when one container is running an application that want's exclusive access to a device or hardware and another container want's to do the same. Without proper mediation you could easily run into race conditions, however modern software stacks are pretty well battle tested at this point so it is unlikely to cause an issue now unless you go out of you way to do so.

+

Containers do fit into an interesting situation. At what point does a container become a virtualized environment, and at what point does it not? Take LXC for instance, you can run multiple emulated environments using LXC, so a lot of people consider it virtualization. But by the same token, what about chroot? chroot is as the name implies, changing the root folder. Many people don't consider it to be virtualization or Emulation, but I personally do. Chroot is even rather close to a hypervisor itself. It allows you to run an emulated environment and allows you to share resources with the main environment.

+

Virtualization is just a branch of emulation

+

In the end, Virtualization, and Containerization if you consider them separate technologies, are both specific techniques Emulation. To say something is "Not emulation, it's Virtualization" is IMO wrong. Like wine it would perhaps be better to say "Not just Emulation". Perhaps that could solve the technical term tomfoolery that is present in this small segment of the tech world.

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/blog/waydroid/index.html b/blog/waydroid/index.html new file mode 100644 index 0000000..113e942 --- /dev/null +++ b/blog/waydroid/index.html @@ -0,0 +1,161 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Waydroid, What is it, and How does it work?

+ + + + + + 13 minute read + + + + + Published: 2024-01-17 + +
+
+

For those who read my Bliss article, This can be thought of a sort of companion peice to it, Both Waydroid and Bliss OS are Bliss Labs projects, a sort of family when it comes to android development, Waydroid and Bliss share a lot of the same technical aspects both being generic android x86 projects. Waydroid is becomming increasingly popular within the linux community but it seems to not be well understood. Hopefully this blog will shine some light on some of the topics that make waydroid unique to android ecosystem, without diving too far into technicals.

+

Prelude

+

For those familiar with collabora's past endeavors, Waydroid builds upon Collabora's SPURV and is quite related with a lot of how the technology works. This means that those familiar with that project will have a fairly good understanding of how waydroid works already.

+

So what IS waydroid?

+

Waydroid is widely regarded as an android emulator for android, And if you have ready my technical term tomfoolery article, You would know that I agree with the explanation, but for the general audience, "It's not emulation, it's containerization". That's right, cringe aside, Waydroid works by running android natively within a LXC container environment.

+

This means waydroid shares almost all of its resources with the host it runs on. Applications run natively on the host kernel without any need for translation like wine, It directly commuicates with hardware like the GPU. Network is setup using native linux networking techniques like bridges and taps, Ram is shared, and more.

+

But waydroid is a bit more then just "running android in a container." so lets talk about it.

+

Native Integration

+

Many people are probably aware of this already, but waydroid strives to feel "native" to the system. It supports a native feeling "multi window mode". A variety of input and interaction methods, and much more. But how does waydroid accomplish this?

+

First and foremost, Waydroid has a hard requirement on Wayland, Hence the name Wayland + Android = Waydroid. This is how Waydroid is able to achieve the "Native" feeling. By directly integrating with the host's wayland compositor.

+

The display

+

The first thing we need to do is have a quick overview on how android display stack works. Without getting to indepth, There are three main components in the android display stack, The app itself, SurfaceFlinger, and Hardware Composer. The apps get rendered by surface flinger onto their own respective layers. This is an opengl/vulkan process. The layers then get sent to the Hardware Composer, This takes the indivdual layers and arranges them into a sort of sandwhich, making sure every layer is in the right position, This layer sandwhich then gets sent to the GPU so the GPU's own functions can handle this outside of vulkan/opengl.

+

So where does waydroid fit into this? Waydroid more or less replaces the final part where Hardware composer sends the sandwhich to the GPU, instead Waydroid packages it up into a wayland compatible window and send that to the host via waydroid socket. Doing it like this means waydroid should always render windows correctly as android intended them to be rendered. Waydroid itself is still handling the compositing of the window, we just need to make sure that composited window makes it to the host's compositor intact.

+

Audio

+

Those who have used waydroid for a while now may have noticed that audio is mostly a set and forget kind of thing. Those who are familiar with Linux will feel more or less right at home here. Android audio is actually handled quite similar to your standard linux distro at a low level. You use a high level audio server. In android's case the Audio Flinger, Which then sends audio to a low level interface for communication with the hardware. This can be a varieity of sound servers like OSS, or in our case. Alsa. Thanks to Alsa being well understood, It is fairly easy enough to make Alsa write directly to a pulse server, which is exactly what waydroid does. Because of this, all of waydroid's audio is mixed within android itself and and sent to the host.

+

This sounds detrimental at first glance, and for sure does come with it's own detriments, However the are great benefits to this. Again all android apps behave normally. If an android app wants to control audio, it works just as it would on a regular android app.

+

It could be possible to implement pipewire support, Or perhaps modify Audio Flinger so we could control app audio directly, but no effort nor interest has been made towards this.

+

So we do trade off some "Native Integration" here but the app compatibility is excellent, and the solution has proven to be quite robust, albiet perhaps less flexible then some would like, as it does require a pulseaudio implementation on the host. It also has some known issues with latency, but those may be config related.

+

Inputs

+

Waydroid's input support is where the wayland integration really shines. Waydroid directly uses wayland protocols to support capturing inputs into android. Waydroid recieves the input events like any other wayland app, Takes the input events, and sends them to the android side. This means we have excellent integration with wayland input events nearly perfectly so far. Mouse, Keyboard, Multitouch, and even stylus with pressure support all works out of box. Artisic types rejoyce, Ibis paint for instance works perfectly. Any artistic android app you rely on should have out of box for things like pressure sensitivity, Tablet buttons, and more.

+

This is sadly a double edged sword. Im sure many readers are aware, Wayland is both great and terrible at the same time. Due to this tight integration with wayland, this makes inputs outside of wayland... not great. While waydroid can discover devices by uevent, which is off by default for very good reasons, We don't have a real method for discovering other devices, nor locking usage of them. This means peripherals that don't have wayland support like controllers have pretty poor support.

+

They do still work with uevent, but uevent works by listening to hotplugging, and waydroid cannot lock control of these devices. This means things like keyboards if they are connected after waydroid starts will get input from both Wayland, and the kernel interface itself, effectively double inputting everything.

+

To get controllers working you actually need to disconnect the device and reconnect it. Sadly there is no real way to work around this currently with wayland. We are stuck waiting for upstream wayland protocol to support the devices. If you want support for them, Perhaps follow the threads in the wayland protocols repo, or make a new issue ticket if one doesn't exist.

+

Sensors

+

Sensors is probably the "Least integrated" solution, but surprisngly it might just be the most flexible. Waydroid sensor integration is mostly done through an auxiliar service, called waydroid-sensors. This will gather sensor data from the host and send it to waydroid over the binder interface, and then back to the host if needed. It's a simple solution but quite robust, It supports things like GPS for locations, Gravity and Sccelerometers work too. But like all things, it can't support everything out of the box, so if you run into issues make sure to report them.

+

What does waydroid not do?

+

First and foremost, Waydroid does not provide emulation solutions and so long as an open source one doesn't exist, will never do so. These things are usually licence encumbered and pose licence issues that waydroid cannot eat up. Thankfully it doesn't make a lot of sense for waydroid to ship these anyways since the two options that exist currently, make sense for different people, so letting the user set it up themselves actually works better in most cases.

+

Waydroid doesn't currently ship a currated set of applications, This can be seen as a pro and a con by many, but by default, Waydroid ships a fairly stock AOSP experience, leaving the user to install their own app suite. This is worth taking note of, since while there are many people who much prefer this, There of course are some would rather have the "Full Ecosystem". This is fairly easy to setup using wpm, the waydroid package manager. There are of course issues with this approach one being that it only supports fdroid repos, However considering the nature of Linux as a whole, I don't think that is a major cause for concern.

+

Houston, we have bugs.

+

There are also things waydroid doesn't do, but it should do. In many of these cases it is due to bugs or regressions, but in others, It's just effort that hasn't yet been made. One of the more requested features that waydroid actually used to have is camera support. Unforunately camera support was lost with the android 11 migration. There have been talks of ways around this, but in the end nothing concrete has been done.

+

Also on the topic of cameras is libcamera. Waydroid never had support for libcamera but it has been requested in the past. While android does support libcamera, it's infranstructure is still pretty immature. While there has been some work on getting libcamera working, it has unforunately stalled.

+

It doesn't stop with camera however, Security is a place where waydroid is for sure lacking as well. While waydroid isn't "unsafe" There hasn't been a lot of consideration made for making it safe, The container itself runs as root, and while android does have fairly good internal sandboxing, It could still be possible for a rogue app to gain root permissions in some method, and when that happens, It doesn't matter what container tech you use, Root is Root unless it isn't. CONFIG_USER_NS_UNPRIVILEGED can be used to make a "Fake Rootful container" however this needs to be paired with additional permissions system. Without a system like SELinux or apparmor in place, this, while more secure then a root container, Is still insecure, and could bring up security issues in other places.

+

So why is waydroid so important?

+

Linux Mobile users rejoyce.

+

This is the part where I sell waydroid to potential investors or contributors. And I don't think it will be too hard to do so. First let me address linux mobile space. Linux mobile apps suck. There is just no nice way to put it. The design is all over the place, While the freedom is a good thing there is a significant lack of quality applications with a consisitent design and usage paradigm. On android it's pretty standard, Nearly all apps support the "Back Button" as an input and it behaves predictably across the board. You have the standard locations, Top left for a "Menu button" or a "Back button" if a menu button isn't needed, Swipe from side to open drawers etc. This consistent user design is something that most people have come to expect, maybe not specifically android's usage paradigm, but at least it is a paradigm.

+

Android is easier to deploy then ever before.

+

And how about just specifically android users and distributors? Perhaps you need your android device to last a long time, and you want the flexibility and ease of deploying it anywhere. Waydroid has a very minimal set of dependencies. Python, A somewhat modern kernel with binder enabled, LXC and Wayland. That is pretty much it for the major dependencies. You do have some smaller libraries you need but those are easy and minimal enough to deploy anywhere. As long as it can run vanilla linux, you can easily port android to it in a consitent and easy manor. Waydroid is very generic, Most of the work done to arm, applies broadly to nearly all arm devices, and the exact same can be said with x86.

+

There are still some more specific stuff. We rely on mesa for graphics with the generic stack when possible. This means if your device doesn't support mesa it might wind up being a problem, and you may need to use software rendering. It may be possible at some time in the future to use the host's GPU drivers, but currently waydroid will use it's internal mesa to talk directly with the gpu kernel driver.

+

Another benefit of this is that we can support devices much longer then traditional android does. Waydroid is actively working on devices older then 2011 with good preformance and reliability. The entirely open nature of Waydroid's AOSP stack means upgrading in the future will be extremely little effort which, as to what I said before, nearly all thhe work is shared across devices. If you are a company working on custom android solutions, by working with waydroid you can have a much easier time supporting your legacy devices, and porting to your new devices.

+

For independant contributors who just like foss software, getting android working on your custom arm box or your little x86 nuc is as often as making sure it can utilize foss software. You can buy just about any SBC that has good mesa support, install linux on it, and have waydroid up and running in short time and have full and consistent accsess to android's vast library of useful applications.

+

Games

+

It's hard to talk about waydroid without talking about games. As one would think, Android games are actually quite popular. Waydroid can boast good support for the majority of games. Sadly there are games out there which will not work even with a lot of configuration. There are some games which are arm only. While a lot of these games can work when you load up libhoudini or libndk. There are still plenty of games which will actively block out solutions like waydroid. There are also games which require specific features that don't work well with mesa too. Some games actively require some features only commonly found in the GPUs and Drivers of android devices. And while this is fairly rare, it can for sure still happen.

+

There are also other games which require you to spoof the device you are using. Waydroid can play these games but you will require a script to set up the spoofing for these applications, and while it is a hassle, it does work. Another configuration issue you may run into is mouse/touch. There are games out there which will require you to set up touch emulation since they wont respond to mouse. Waydroid does have touch emulation baked into it however so this is a fairly easy config setting to change.

+

Even with all this said, Many users will find that more often then not, games will just work once you have houdini or ndk installed. You can load up google play games, or Aurora store and have a large library of games at your disposal. Gaming is a fairly popular use for waydroid and any contributions to make it better, whether it be to waydroid itself, or better integrating and contributing to open source tools like XTMapper will go a long way, and be greatly appreciated by many users.

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/blog/whynotusejxl/index.html b/blog/whynotusejxl/index.html new file mode 100644 index 0000000..302e825 --- /dev/null +++ b/blog/whynotusejxl/index.html @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + +
+

Why aren't we using JXL?

+ + + + + + 8 minute read + + + + + Published: 2024-01-14 + +
+
+

This is a mini part two in something I didn't think was going to become a series. but I wanted to test this specifically anyways. So here we go.

+

I downloaded 20k images from danbooru.

+

I decided to do a real world test case for JXL. I haven't seen many of these done. I've seen a lot of smaller scale tests which showcase sometimes JXL does great, and sometimes not so much. But I haven't really seen anything actually indicative of how JXL can actually benefit service providers.

+

Firstly, why 20k images? I decided this would give a pretty good representation, without being a complete dick. The 20k or so (a little less due to errors) images came out to 34.69 GiB total.

+

It's really not good etiquete to do something like this, and any more images, say 60k I would rather get an OK for someone first, since I had already downloaded 34Gb of data over a couple minutes. I decided roughly 20k was enough images. Throttling my download speed would be fine too but I still would rather show good ettiqute.

+

So how did I do this? It's really simple to replicate. Below is a script you can use to test this out for yourself.

+
#!/bin/bash
+## Tools Needed;
+## imageboard_downloader https://github.com/FerrahWolfeh/imageboard-downloader-rs
+### If cargo is installed, compile and install with cargo install --git https://github.com/FerrahWolfeh/imageboard-downloader-rs
+## GNU Parallel https://www.gnu.org/software/parallel/
+
+mkdir -p jpg
+mkdir -p jpg-jxl
+mkdir -p png
+mkdir -p png-jxl
+
+## Download Images
+## "-animated" to remove gifs and videos
+
+imageboard_downloader search -i danbooru -d 10 -l 20000 -o ./dls --id -- "-animated"
+
+mv dls/*.jpg jpg/
+mv dls/*.jpeg jpg/
+mv dls/*.png png/
+
+## change -j ${JOBS}
+ls png/  | parallel -j 6 cjxl -e 4 -d 0 --num_threads 1 png/{} png-jxl/{.}.jxl
+ls jpg/ | parallel -j 8 cjxl -e 6 -d 0 --num_threads 1 jpg/{} jpg-jxl/{.}.jxl
+
+

A nice simple script. We don't want to download any animated pictures like gifs, or any video's since it doesn't really help out what we need for this. I use -e 4 for encoding the PNGs since I want my ryzen 2600 to have a fighting chance at encoding these at any reasonable speeds. I also only have 16g of ram and need to use my PC for doing other things at the same time. You can get better results then I did by increasing the effort, but for lossless I think -e 4 is a good benchmark.

+

When encoding the Jpeg images we can afford to use a much higher effort since we are by default using lossless jpeg transcoding which is extremely fast and efficient. Any more then -e 6 however and the gains you see are minimal at best so it's best to just leave it here IMO. So, how did we come out? Below is the before and after of the roughly 20k images.

+
  booru-test dua png jpg   
+  15.69 GiB png
+  18.97 GiB jpg
+  34.66 GiB total
+  booru-test dua png-jxl jpg-jxl
+   9.63 GiB png-jxl
+  15.10 GiB jpg-jxl
+  24.73 GiB total
+
+

It's also important to check how many errors we ran into, Not all files can always be decoded, sometimes it will error out so we can quickly check the file counts, and yup, not enough errors to be a massive issues.

+
  booru-test ls png-jxl | wc -l && ls png | wc -l                                         
+4774
+4776
+  booru-test ls jpg-jxl | wc -l && ls jpg | wc -l
+14559
+14560
+
+

This may not be the most impressive numbers we have ever seen. We can clearly see PNG gave us a great size savings, while JPG didn't give us too much. PNG saved us around 39% of filesize at e4 and jpg at e6 saved us about 15% of filesize. This is slightly more impressive when we realize we lost pretty much nothing (besides browser support which is the topic of this post) in exchange. We still have fast decode times, we still have progressive decoding (unlike some other image formats) and this is entirely lossless. Not a single image (except those that failed to decode for one reason or another) was a lossy encode.

+

Even judging just the jpeg benefits. If you are a provider, this is 15% filesize and bandwidth savings. 15% file size may not sound like a lot, but 15% bandwidth is quite the savings for a lot of services. For services that self host and have bandwidth speed cap, this is 15% less traffic clogging your pipes. A direct benefit to the total speed your users can experience. If you pay for bandwidth. Well it speaks for itself.

+

Well that's neat. Why aren't we using this?

+

This is a great question. You may have noticed I didn't even touch on lossy JXL. Why? In my opinion, Lossless JXL itself is a large enough benefit to warrent inclusion in the modern browser landscape. We don't need any flawed metrics to compare visual fidelity. These are 1:1 comparisons with no data loss.

+

However the team working on chromium preformed a massively flawed test in where they compared AVIF to JXL using a massively at the time out of date libjxl where avif got better decode preformance. When comparing image fidelity, they choose to showcase MS-SSIM which regularly outpreformed JXL vs other metrics they had which often preformed in favour of JXL but usually more balanced weighing JXL and AVIF similarly.

+

They often compared single thread and eight threaded workloads, but nothing inbetween.

+

Despite this flawed testing methodology Apple has integrated JXL support in their products, and a significant amount of internet traffic advertises JXL support. This is from Jon Sneyers on the JXL discord

+
+

November 21: +13.92% of Cloudinary image requests came from a user agent supporting jxl (2.8 billion out of 20.1 billion images served that day)

+
+
+

December 5: +19.52% of Cloudinary image requests came from a user agent supporting jxl (3.1 billion out of 15.7 billion images served that day)

+
+

As we can see nearly 20% of their traffic now is advertises JXL support. This is roughly in line with exptectations as gs.statcounter reports IOS to account for a total around 28% of total mobile market share. And with how significant mobile internet usage is on the global scale now, this means JXL compatible devices account for a significant amount of marketshare.

+

Recently we were even shared a small chart showing JXL enabled traffic from cloudinary, You can see the chart below, where it currently sits about 20% traffic. Shopify actively serves JXL to supported clients. If they have similar statistics that would mean about 20% of their mobile traffic gets JXL images.

+

cloudinary chart

+

For android devices, we do at least have freedom, we can install a JXL compatible browser, and a JXL compatible gallery app. However that will only go so far as being compatible with those specific apps. It is no platform support sadly. Custom roms may implement support for JXL, but well... That's still just custom roms.

+

In the end, Chrome and Android support are the final barriers. Firefox... well does anyone actually care what they do anymore? Once chrome and Android support JXL. The vast majority of applications people at mass use will support it. How can you get involved? It's easy, Ask on the forums for your devices for JXL support. If you find webpages that use JXL like my previous entry, you can report those webpages as broken to chrome.

+

Why not talk about lossy JXL?

+

I am very hesitant to talk about JXL, First and foremost, for those who really want to know, let me show you the results of running cjxl -e6 -d 0.5 on the folder. These are the file sizes down below.

+
booru-test dua png png-jxl-loss 
+   3.02 GiB png-jxl-loss
+  15.69 GiB png
+  18.71 GiB total
+
+

Now, Why don't I want to talk about lossy? I don't want to get caught in the inevitable trap of talking about metrics. Metrics are quite an intensly debated topic, and I could talk great about one metric, or talk crap about another one, and always upset someone. Each metric has it's flaws. and quite honestly i don't want to spend a week testing every single metric I can find. -d 0.5 is a distance which is considered "Visually lossless" by many and this is the only reason why I am showcasing it here, it is fairly uncontroversial. I can't tell a difference, and this is how I encode all of my pictures, Camera shots, Downloaded pictures, Comics and manga. Doesn't matter, If I don't need lossless, I save it using -e 0.5. I typically bump the effort to -e 8 for my own content, but -e 6 might be considered a more safe estimate for what people would actually want to encode it with if you are running a service. As you can see, JXL is smaller, there really isn't any surprise here. You get your progressive decoding for web delivery, The benefits I talked about in the other article so on and so forth.

+

Without running a comprehensive comparison against AVIF, WEBP, and Jpeg. IMO there really isn't any point in talking about lossy jxl, And I believe the merits of lossless JXL to be more then enough to prove Jpeg-XL's worth in the modern market.

+ +
+ + + + + + + +
+
+
+
+
+ + + + diff --git a/categories/index.html b/categories/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/categories/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/categories/information/index.html b/categories/information/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/categories/information/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/categories/information/page/1/index.html b/categories/information/page/1/index.html new file mode 100644 index 0000000..3d30e8e --- /dev/null +++ b/categories/information/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/categories/tech-showcase/index.html b/categories/tech-showcase/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/categories/tech-showcase/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/categories/tech-showcase/page/1/index.html b/categories/tech-showcase/page/1/index.html new file mode 100644 index 0000000..53cf8b0 --- /dev/null +++ b/categories/tech-showcase/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/categories/tidbits/index.html b/categories/tidbits/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/categories/tidbits/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/categories/tidbits/page/1/index.html b/categories/tidbits/page/1/index.html new file mode 100644 index 0000000..1336dcb --- /dev/null +++ b/categories/tidbits/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/categories/tutorial/index.html b/categories/tutorial/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/categories/tutorial/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/categories/tutorial/page/1/index.html b/categories/tutorial/page/1/index.html new file mode 100644 index 0000000..760efd7 --- /dev/null +++ b/categories/tutorial/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/index.html b/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/page/1/index.html b/page/1/index.html new file mode 100644 index 0000000..66b7e2b --- /dev/null +++ b/page/1/index.html @@ -0,0 +1,6 @@ + + + + +Redirect +

Click here to be redirected.

diff --git a/robots.txt b/robots.txt new file mode 100644 index 0000000..14c4416 --- /dev/null +++ b/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Disallow: +Allow: / +Sitemap: https://quackdoc.github.io/sitemap.xml diff --git a/signature.html b/signature.html new file mode 100644 index 0000000..bfa5600 --- /dev/null +++ b/signature.html @@ -0,0 +1,4 @@ +

Links to my other stuff
+ Github
+ Mastodon

+ \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..bda1142 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,108 @@ + + + + https://quackdoc.github.io/ + + + https://quackdoc.github.io/authors/ + + + https://quackdoc.github.io/blog/ + + + https://quackdoc.github.io/blog/clivideo/ + 2023-11-02 + + + https://quackdoc.github.io/blog/hidden-jxl-benefits/ + 2023-11-20 + + + https://quackdoc.github.io/blog/page/1/ + + + https://quackdoc.github.io/blog/page/2/ + + + https://quackdoc.github.io/blog/persitevpn/ + 2023-11-04 + + + https://quackdoc.github.io/blog/repairmanstips/ + 2023-11-02 + + + https://quackdoc.github.io/blog/scrcpycamera/ + 2023-11-02 + + + https://quackdoc.github.io/blog/sellinglinux/ + 2023-11-02 + + + https://quackdoc.github.io/blog/technicaltermtomfoolery/ + 2023-11-02 + + + https://quackdoc.github.io/blog/waydroid/ + 2024-01-17 + + + https://quackdoc.github.io/blog/whynotusejxl/ + 2024-01-14 + + + https://quackdoc.github.io/categories/ + + + https://quackdoc.github.io/categories/information/ + + + https://quackdoc.github.io/categories/information/page/1/ + + + https://quackdoc.github.io/categories/tech-showcase/ + + + https://quackdoc.github.io/categories/tech-showcase/page/1/ + + + https://quackdoc.github.io/categories/tidbits/ + + + https://quackdoc.github.io/categories/tidbits/page/1/ + + + https://quackdoc.github.io/categories/tutorial/ + + + https://quackdoc.github.io/categories/tutorial/page/1/ + + + https://quackdoc.github.io/tags/ + + + https://quackdoc.github.io/tags/cli/ + + + https://quackdoc.github.io/tags/emulation/ + + + https://quackdoc.github.io/tags/images/ + + + https://quackdoc.github.io/tags/linux/ + + + https://quackdoc.github.io/tags/video/ + + + https://quackdoc.github.io/tags/virtualization/ + + + https://quackdoc.github.io/tags/vpn/ + + + https://quackdoc.github.io/tags/windows/ + + diff --git a/tags/cli/index.html b/tags/cli/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/cli/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/emulation/index.html b/tags/emulation/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/emulation/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/images/index.html b/tags/images/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/images/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/linux/index.html b/tags/linux/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/linux/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/video/index.html b/tags/video/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/video/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/virtualization/index.html b/tags/virtualization/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/virtualization/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/vpn/index.html b/tags/vpn/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/vpn/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + + diff --git a/tags/windows/index.html b/tags/windows/index.html new file mode 100644 index 0000000..1dd6572 --- /dev/null +++ b/tags/windows/index.html @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+

Nothing Here Right now. Check the blog for content

+ +
+
+
+
+
+ + + +