From 5af7f40fd23905543ba9dc2f7b4a7714a8dda6bd Mon Sep 17 00:00:00 2001 From: bfahrenfort Date: Sat, 31 Aug 2024 16:30:42 +1000 Subject: [PATCH] Quartz sync: Aug 31, 2024, 4:30 PM --- content/Essays/ai-infringement.md | 30 ++++++---- content/Essays/no-ai-fraud-act.md | 12 ++-- content/Essays/on-linux.md | 12 +++- content/Programs I Like/rust-macros.md | 8 +-- content/Projects/latex.md | 83 ++++++++++++++++++++++++++ content/Projects/my-computer.md | 17 ++++-- content/Projects/rsgistry.md | 8 +-- content/Updates/2024/aug.md | 21 +++++++ content/curated.md | 4 +- content/index.md | 2 +- quartz.config.ts | 7 ++- quartz/styles/custom.scss | 5 ++ 12 files changed, 171 insertions(+), 38 deletions(-) create mode 100644 content/Projects/latex.md create mode 100644 content/Updates/2024/aug.md diff --git a/content/Essays/ai-infringement.md b/content/Essays/ai-infringement.md index e7ba108a1..234f6e49a 100755 --- a/content/Essays/ai-infringement.md +++ b/content/Essays/ai-infringement.md @@ -65,18 +65,19 @@ Searle's exercise was at the time an extension of the Turing test. He designed i Generative AI models from different sources are architected in a variety of different ways, but they all boil down to one abstract process: tuning an absurdly massive number of parameters to the exact values that produce the most desirable output. (note: [CGP Grey's video on AI](https://www.youtube.com/watch?v=R9OHn5ZF4Uo) and its follow-up are mainly directed towards neural networks, but do apply to LLMs, and do a great job illustrating this). This process requires a gargantuan stream of data to use to calibrate those parameters and then test the model. How it parses that incoming data suggests that, even if the method of acquisition is disregarded, the AI model still infringes the input. #### The Actual Tech -At the risk of bleeding the [[#Generation]] section into this one, generative AI is effectively a very sophisticated next-word predictor based on the words it has read and written previously. +At the risk of bleeding the [[#Generation]] section into this one, generative AI is effectively a very sophisticated next-word predictor based on the words it has read and written previously. -First, this training is deterministic. It's a pure, one-way, data-to-model transformation (one part of the process for which "transformer models" are named). The words are ingested and converted into one of various types of formal representations to comprise the model. It's important to remember that given a specific work and a step of the training process, it's always possible to calculate by hand the resulting state of the model after training on that work. The "black box" that's often discussed in connection with AI refers to the final state of the model, when it's no longer possible to tell what effect of certain portions of the training data have had on the model. +First, this training is deterministic. It's a pure, one-way, data-to-model transformation (one part of the process for which "transformer models" are named). The words are ingested and converted into one of various types of formal representations to comprise the model. It's important to remember that given a specific work and a step of the training process, it's always possible to calculate by hand the resulting state of the model after training on that work. The "black box" that's often discussed in connection with AI refers to the final state of the model, when it's no longer possible to tell what effects the data ingested at earlier steps had on the model. -If some words are more frequently associated together, then that association is more "correct" to generate in a given scenario than other options. And the only data to determine whether an association *is* correct would be that training input. This means that an AI trains only on the words as they are on the page. Training doesn't have some external indicator of semantics that a secondary natural-language processor on the generation side can incorporate. Training thus can't be analogized to human learning processes, because **when an AI trains by "reading" something, it isn't reading for the *forest*—it's reading for the *trees***. Idea and expression are meaningless distinctions to AI. +In the model, if some words are more frequently associated together, then that association is more "correct" to generate in a given scenario than other options. A parameter called "temperature" determines how far the model will stray from the correct next word. And the only data to determine whether an association *is* correct would be that training input. This means that an AI trains only on the words as they are on the page. Training doesn't have some external indicator of semantics that a secondary natural-language processor on the generation side can incorporate. Training thus can't be analogized to human learning processes, because **when an AI trains by "reading" something, it isn't reading for the *forest*—it's reading for the *trees***. Idea and expression are meaningless distinctions to AI. As such, modern generative AI, like the statistical data models and machine learners before it, is a Weak AI. And weak AIs use weak AI data. Here's how that translates to copyright. -- Sidebar: this point doesn't consider an AI's ability to summarize a work since the section focuses on how the *training* inputs are used rather than how the output is generated from real input. This is why I didn't want to get into generation in this section. It's confusing, but training and generation are merely linked concepts rather than direct results of each other when talking about machine learning. Especially when you introduce concepts like "temperature", which is a degree of randomness added to a model's (already variant) choices in response to an user in order to simulate creativity. +- Sidebar: this point doesn't consider an AI's ability to summarize a work since the section focuses on how the *training* inputs are used rather than how the output is generated from real input. This is why I didn't want to get into generation in this section. It's confusing, but training and generation are merely linked concepts rather than direct results of each other when talking about machine learning. Especially when you introduce concepts like temperature in order to simulate creativity. - ...I'll talk about that in the next section. #### "The Law Part" -All of the content of this section has been to establish how an AI receives data so that I can reason about how it *stores* that data. In copyright, reproduction, derivatives or compilations of works without authorization can constitute infringement. I believe that inputting a work into a generative AI creates a derivative representation of the work. Eventually, the model is effectively a compilation of all works passed in. And finally (on a related topic), there is nothing copyrightable in how it's arranged the works in that compilation even if every work trained on is authorized. -- Sidebar: fair use analysis for both training and generation is located in the [[#Fair Use|Policy: Fair Use]] section. +All of the content of this section has been to establish how an AI receives data so that I can reason about how it *stores* that data. Everything about training but fair use is in this section, which is located in [[#Fair Use|Policy: Fair Use]]. + +In copyright, any of reproduction, derivatives or compilations of works without authorization can constitute infringement. And I believe that inputting a work into a generative AI creates a derivative representation of the work. Eventually, the model is effectively a compilation of all works passed in. Finally—on a related topic—there is nothing copyrightable in how the model has arranged the works in that compilation, even if every work trained on is authorized. Recall that training on a work incorporates its facts and the way the author expressed those facts into the model. When the training process takes a model and extracts weights on the words within, it's first reproducing copyrightable expression, and then creating something directly from the expression. You can analogize the model at this point to a translation (a [specifically recognized](https://www.law.cornell.edu/uscode/text/17/101#:~:text=preexisting%20works%2C%20such%20as%20a%20translation) type of derivative) into a language the AI can understand. But where a normal translation would be copyrightable (if authorized) because the human translating a work has to make expressive choices and no two translations are exactly equal, an AI's model would not be. A given AI will always produce the same translation for a work it's been given, it's not a creative process. Even if every work trained on expressly authorized training, I don't think the resulting AI model would be copyrightable. And absent authorization, it's infringement. @@ -135,7 +136,16 @@ These arguments will be more or less persuasive to different people. I think the > More topics under this section forthcoming! I work and edit in an alternate document and copy over sections as I finish them. ## Fair Use -WIP +Whenever a legal doctrine has strong roots in collective consciousness and policy, there's an epistemological question about how to approach the issue. The debate asks: in the abstract, should the courts protect what *descriptively is* considered within the bounds of protection, or what *ought to be* recognized by society as deserving protection? +- Nerd sidebar: This debate is common in criminal law. For example, examine the reasonable expectation of privacy. *Are* members of the public actually concerned with police access to the data on their phone or do they think they have nothing to hide? *Should* they be? Recent cases on searches and third party access trend towards analysis under the latter, more paternalistic position. + +In fair use, the first ("empirical") perspective teaches that fair use should only extend to concepts analogous to prior enforcement which has been accepted in the collective consciousness. In contrast, the second ("normative") perspective would disregard comparison with enforcement in favor of comparison with societal values. + +Because it's such an alien technology to the law, I'd argue that generative AI's fair use should be analyzed in view of the normative approach. But even under that approach, I don't think AI training or generation should be considered fair use. + +With respect to training, ==a== + +Turning to generation, ==b== ## Who's holding the bag? WIP https://www.wsj.com/tech/ai/the-ai-industry-is-steaming-toward-a-legal-iceberg-5d9a6ac1?st=5rjze6ic54rocro&reflink=desktopwebshare_permalink @@ -144,13 +154,13 @@ Well, here it is once more. There's strangely an inverse relationship between fa ## The First Amendment and the "Right to Read" This argument favors allowing GAI to train on the entire corpus of the internet, copyright- and attribution-free, and bootstraps GAI output into being lawful as well. The position most commonly taken is that the First Amendment protects a citizen's right to information, and that there should be an analogous right for generative AI. -The right to read, at least in spirit, is still being enforced today. Even the 5th Circuit (!!!) believes that this particular flavor of First Amendment claim will be likely to succeed on appeal after prevailing at the trial level. [*Book People v. Wong*](https://law.justia.com/cases/federal/appellate-courts/ca5/23-50668/23-50668-2024-01-17.html), No. 23-50668 (5th Cir. 2024) (not an AI case). It also incorporates principles from intellectual property law. Notably, that you can read the content of a work without diminishing the value of the author's expression (i.e. ideas aren't copyrightable). As such, the output of an AI is not taking anything from an author that a human wouldn't take when writing something based on their knowledge. +The right to read, at least in spirit, is still being enforced today. Even the 5th Circuit (!!!) believes that this particular flavor of First Amendment claim will be likely to succeed on appeal after prevailing at the trial level. [*Book People v. Wong*](https://law.justia.com/cases/federal/appellate-courts/ca5/23-50668/23-50668-2024-01-17.html), No. 23-50668 (5th Cir. 2024) (not an AI case). It also incorporates principles from intellectual property law. Notably, this argument states that one can read the content of a work without diminishing the value of the author's expression (*i.e.*, ideas aren't copyrightable). As such, the output of an AI is not taking anything from an author that a human wouldn't take when writing something based on their knowledge. I take issue with the argument on two points that stem from the same technological foundation. -First, as a policy point, the argument incorrectly humanizes current generative AI. There are no characteristics of current GAI that would warrant the analogy between a human reading a webpage and an AI training on that webpage. +First, as a policy point, the argument incorrectly humanizes current generative AI. There are no characteristics of current GAI that would warrant the analogy between a human reading a webpage and an AI training on that webpage. Even emerging tools like the improperly named [Deep Document Understanding](https://github.com/infiniflow/ragflow/blob/main/deepdoc/README.md)—which claim to ingest documents "as \[a\] human being"—are just classifiers on stochastic data at the technical level, and are not actual "understanding." -Second and more technically, [[#Training|the training section]] above is my case for why an AI does not learn in the same way that a human does in the eyes of copyright law. ==more== +Second, and more technically, [[#Training|the training section]] above is my case for why an AI does not learn in the same way that a human does in the eyes of copyright law. ==more== But for both of these points, I can see where the confusion comes from. The previous leap in machine learning was called "neural networks", which definitely evokes a feeling that it has something to do with the human brain. Even more so when the techniques from neural network learners are used extensively in transformer models (that's those absurd numbers of parameters mentioned earlier). ## Mini-arguments diff --git a/content/Essays/no-ai-fraud-act.md b/content/Essays/no-ai-fraud-act.md index 0c069ba58..dabbee66d 100755 --- a/content/Essays/no-ai-fraud-act.md +++ b/content/Essays/no-ai-fraud-act.md @@ -6,7 +6,7 @@ tags: - ai date: 2024-01-24 draft: false -lastmod: 2024-07-28 +lastmod: 2024-08-29 --- > [!info] **Update** > Since publication, GPT-4o was released with controversy over a celebrity voice being potentially appropriated. [[#Detour Her|-> Jump to: Her]] @@ -99,7 +99,7 @@ Two days before the ChatGPT 4.0 demo was released, Mr. Altman contacted my agent > In a time when we are all grappling with deepfakes and the protection of our own likeness, our own work, our own identities, I believe these are questions that deserve absolute clarity. I look forward to resolution in the form of transparency and the passage of appropriate legislation to help ensure that individual rights are protected. " > [!warning] -> This statement leaves out a detail. Sky was released in September (the month that OpenAI reached out to Scarlett), but gained widespread public attention in the May announcement. h/t [Timothy Lee](https://www.understandingai.org/p/johanssons-beef-with-openai-points) +> This statement leaves out a detail. Sky was released in September 2023 (the month that OpenAI reached out to Scarlett), but gained widespread public attention in the May 2024 announcement. h/t [Timothy Lee](https://www.understandingai.org/p/johanssons-beef-with-openai-points) > > I don't think it makes much difference; the likely eventuality is that both times Scarlett was contacted before a publicity event that would feature a model inspired by (and possibly cloned from) her voice. Additionally, given tech bro culture ("go fast break shit"), I think both times the AI model had already been trained on her voice and presented to Altman in a finished state. @@ -118,7 +118,7 @@ This language is from trademark cases, and is tied to a First Amendment side of Of the two fair use factors in this balance, relevance feels more at-home when talking about AI use. If you're just throwing an unauthorized AI-generated picture of Prince into your work because it looked good (was that a Warhol reference?), is it really a choice worth protecting? This factor will ensure that if you're risking the harms of making someone say/appear on something they didn't approve, you've at least got a good reason for it. If taken to the extreme, it could require the use of AI to be strictly necessary to the message of your replica. I doubt that extreme would happen, but if it did, then the EFF's treatment of this section as a First Amendment issue would make more sense, since that would absolutely guarantee the AI use in that specific case's status as protected expression. However, factors like this are almost never interpreted to their limits, as courts would rather balance everything than cabin one factor and label it dispositive. Again, factors are squishy. -If I had been asked to adapt fair use to publicity rights, I would have probably also brought in unfair-competition law by including the market effect factor. There are a lot of cases in competition that cover parties whose content doesn't fit traditional utility-or-authorship models (like the news as news), and the mature principles there would be some less shaky ground for interpretation compared to the rest of this bill. If that were the case, this balance might be considered the primary part of this bill as opposed to the offenses (like how fair use is the headline issue in copyright actions). +If I had been asked to adapt fair use to publicity rights, I would have probably also brought in unfair-competition law by including the market effect factor. There are a lot of cases in competition that cover parties whose content doesn't fit traditional utility-or-authorship models (like the news as news), and the mature principles of that doctrine would provide a stable foundation for interpretation. If that were the case, this balance might be considered the primary part of this bill (like how fair use is the headline issue in copyright actions). Instead, the offenses, not the defenses, take the spotlight. - Sidebar: the Johansson controversy discussed earlier is a good example of where this factor would apply. When OpenAI was unable to procure the celebrity they wanted through the free market, they appropriated her voice, which harms her own ability to monetize her voice the way she chooses. I definitely wouldn't have included the second fair use factor, though. @@ -137,7 +137,7 @@ As an IP right, the Act in its current form is Congressionally excepted from Sec Most of the arguments against the Act's Section 230 exception assume that the Act is indeed too broad. To proponents of that view, this means websites would now be subject to that broad liability. As explained above, the bill isn't as broad as made out to be. Let's analyze the incentives present in an application of the Act consistent with its purposes. -**Hypothetical:** Under the Act, Elon Musk has a cause of action against YouTube, because many hacked YouTube channels end up hosting 24-hour livestreams of his AI-generated voice and likeness pushing cryptocurrency scams and the like. YouTube tries to moderate harmful content wholesale, so would likely be immune from suit if Section 230 applied. +**Hypothetical:** Under the Act, Elon Musk has a cause of action against YouTube, because many hacked YouTube channels end up hosting 24-hour livestreams of his AI-generated voice and likeness pushing cryptocurrency scams and the like. YouTube tries to moderate harmful content wholesale, so would likely be immune from suit if Section 230 applied. *Should* it apply? **Answer**: in such a scenario, the interests probably weigh in favor of a Section 230 exception which would allow Elon to file suit. - YouTube has no strong financial incentive to remove content: @@ -151,7 +151,7 @@ And the final nail in the coffin for immunity is precisely that lack of action i In March 2024, YouTube implemented creator-added Synthetic Content labels as well as automatic, mandatory labeling for content created with their own AI tools. It can **only** be added on a computer on the web version of YT Studio. Their initially mentioned dispute tools are absent, though we have no way of knowing whether those are implemented. -Now look at Apple, who [*still* has no AI policy for the App Store](https://developer.apple.com/app-store/review/guidelines/). Or Google, whose solution is [user labor](https://techcrunch.com/2023/10/25/google-plays-policy-update-cracks-down-on-offensive-ai-apps-disruptive-notifications/). +Now look at Apple, who [*still* has no AI policy for the App Store](https://developer.apple.com/app-store/review/guidelines/) ([and actively profits from harm done by AI](https://www.404media.co/apples-huge-dual-use-face-swap-app-problem-is-not-going-away/)). Or Google, whose solution is [user labor](https://techcrunch.com/2023/10/25/google-plays-policy-update-cracks-down-on-offensive-ai-apps-disruptive-notifications/). The simple truth is that a platform will not properly enforce any standard if not compelled to. Section 230 was ultimately a net positive because it was a nice carrot to goad platforms into enforcing some ground rules, with a tantalizing shield from liability if they did so. The (rightful) fear then was that if held liable regardless of moderation policy, then platforms would not spend a dime on moderation. That is, if even one post slipped through and harmed someone, the lawsuit would hit just as hard regardless of the effort expended to police the platform. The articles fail to mention this important historical fact when they raise the specter of Section 230, despite the credibility it would lend to their points. Yet this historical balance does not apply to AI. - More on the incentives pre-Section 230: having zero involvement actually enables websites to make an argument (albeit weakly) that they have *no* control over their users' content and should be absolved of liability. Any moderation at all obviously defeats this argument. This underscored the historical need for an incentive. However, this *still* doesn't apply to AI because we can guarantee that platforms already implement moderation (unless the nightmare outcome of repealing Section 230 happens). @@ -177,7 +177,7 @@ Notwithstanding the impossibility of a Congressional act canning the First Amend With respect to arguments (not seriously advanced by the articles in issue here) that the statute as a whole is an unconstitutional, note that this is not a legal justification essay. If I was going to evaluate the scrutiny arguments and precedent for whether this specific statute would be constitutional facially or as-applied to specific AI and non-AI examples, I'd submit it as a student note to law journals. I will point out, though, that many other categories of speech are given lesser or no protection just by nature of their effect or content (slander because it harms the subject, non-artistic obscenity, etc). My point is that such a hypothetical challenge would be a closer case than others in this space have portrayed it. -Finally, if the content of someone's AI-generated image/audio/whatever is truly for a 1st amendment protected purpose, nothing stops them from just...making their criticism/opinion without digital depictions. For example, I don't think there's some expressive meaning to making Biden say "I'm senile" that just stating your opinion yourself wouldn't convey (unlike images on a t-shirt, which *were* integral to the point that the student was trying to make in [*Guiles v. Marineau*](https://caselaw.findlaw.com/court/us-2nd-circuit/1101375.html) \[[shirt in question](https://www.thenation.com/wp-content/uploads/2016/02/Scott_Censorship_AP_crop_img-680x430.jpg)\]). Nonetheless, I think reasonable minds could differ on this point, so feel free to let me know your take. +Finally, if the content of someone's AI-generated image/audio/whatever is truly for a 1st amendment protected purpose, nothing stops them from just...making their criticism/opinion without digital depictions. For example, I don't think there's some expressive meaning to making Biden say "I'm senile" that just stating your opinion yourself wouldn't convey (unlike images on a t-shirt, which *were* integral to the point that the student was trying to make in [*Guiles v. Marineau*](https://caselaw.findlaw.com/court/us-2nd-circuit/1101375.html) \[[shirt in question](https://www.thenation.com/wp-content/uploads/2016/02/Scott_Censorship_AP_crop_img-680x430.jpg)\]). To say that relinquishing your control over the direction of a work can be considered an artistic choice is probably a step too far in my mind. Nonetheless, I think reasonable minds could differ on this point, so feel free to let me know your take. ### Detour: Chill, bro Since the articles focus so intently on the Act's purported cabining of protected speech directly, they don't address the possibility of a **chilling effect** on speech. I actually think this point has more merit than the complaints about 1a defenses being limited, or an argument that the whole statute is impermissible because of its restrictions themselves. In fact, this point would be where the other arguments about the statute being vague/broad would have the most powerful effect. ## Conclusion diff --git a/content/Essays/on-linux.md b/content/Essays/on-linux.md index 18489665e..57095b8a3 100755 --- a/content/Essays/on-linux.md +++ b/content/Essays/on-linux.md @@ -7,7 +7,7 @@ tags: - seedling - essay date: 2023-08-23 -lastmod: 2024-03-17 +lastmod: 2024-08-31 --- > [!hint] This page documents my many adventures with Linux and why I enjoy it. > If you're looking to get involved with Linux, feel free to browse the [[Resources/learning-linux|resources for that purpose]] that I've compiled. @@ -56,6 +56,7 @@ First, the startup. Windows has quite a few non-privacy, non-furtive idiosyncras I started playing around with Arch on my 1TB expansion card when Fedora announced they were considering dropping X11 a few months ago. Interestingly, I ended up wanting to use Wayland with Arch anyway. This turned out to be a mistake. #### Digital Extremes and Wonky Macros (DEs/WMs) I first tried Hyprland with a random sensible config I found on YouTube, and once I stripped out Kitty for Alacritty I quite liked it. The only issue was that toolbars on things like Firefox and Dolphin take up way too much screen real estate. +- Kitty and Hyprland have maintainers that are not reflective of the community that uses Linux. Kitty’s is just opinionated, but for the latter I would follow the sentiment of the Freedesktop community (who banned the creator of the WM, leaving the project without any support from the Wayland side) and **avoid Hyprland.** Alternatives are [River](https://codeberg.org/river/river) and the [[Garden/Programs I Like/home#Suckless software|suckless]] project [dwm](https://dwm.suckless.org/). Then, I added gnome and the gnome apps, was fun to try the newest gnome and see how well integrated with Wayland it was. @@ -68,14 +69,19 @@ Once I started encountering dependency hell on Fedora, I backed up my files and I started on Plasma Wayland again. Here's the timeline: 1. Plasma Wayland has some odd quirks, so I research workarounds to make it behave more like GNOME. -2. Wayland has massive performance issues which I was unable to solve, so **Wayland is not yet usable for NVIDIA**. I swap to X11. +2. Wayland has massive performance issues which I was unable to solve, so **Wayland is not yet usable for NVIDIA** { *last attempt at NVIDIA Wayland: August 2024* }. I swap to X11. 3. X11 Plasma reveals some more usability issues with Plasma. It has a massively degraded experience when I'm using my laptop undocked for notes etc. I start using Wayland on the go and X11 at my desktop. 4. Swapping between X11 and Wayland on logout has instability issues, probably due to something in SDDM (because I'm still using Plasma). I realize that I'm only having to deal with these issues because I'm holding on to plasma. 5. I revert to X11 GNOME. All is right with the world, I only need the workarounds that make my eGPU work, and it's more familiar because I've already used it for almost a year. But aside from that roundabout, I've been navigating Arch just fine. I went into it knowing how to negate the most complained pitfall of Arch: that upgrading on a bleeding distro will break your system. To avoid this, I use BTRFS, which I can take snapshots of at any time that I can roll back to using `snapper`. And to make the process easier, I use `snap-pac`, which will automatically take those snapshots when running a `pacman` operation. Finally, to access these when my system is unbootable, `grub-btrfs` allows me to boot into a snapshot directly from the bootloader instead of having to try to mount it from an external OS. I've not yet had breakage, but it's good to have when a problem arises! - The only thing this doesn't really prevent is [grubpocalypse](https://bbs.archlinux.org/viewtopic.php?id=280246), but hopefully I don't ever run into a problem like that. + +Overall, Arch has been a great experience for me. ==ABOUT: how the AUR makes you a more careful package installer== #### Other Fun Times I really like my expansion card for installing toy OSes to. Having an installed OS that you can throw anything on without regard to breakage has been great for messing with whatever catches my fancy. This is actually where I experimented with (wip) [[Projects/vfio-pci|GPU passthrough to a Windows VM]]. -I've also been doing some Rust toolchain witchery on here but I'm not ready to write about it yet. \ No newline at end of file +I've also been doing some Rust toolchain witchery on here but I'm not ready to write about it yet. + +### What's Next? +I'm tracking [SerpentOS](https://serpentos.com/). It looks to be a ground-up attempt at incorporating advantages of unbreakable/immutable distros into the typical rolling-release model. This would take away the need for my workaround on Arch like snapshots, because that feature is baked into their package manager `moss` in a performant feature called 'transactions'. It's from the creator of Solus, so it's guaranteed to be good. Once they have maintainers and a 1.0 release, I'll try it out! \ No newline at end of file diff --git a/content/Programs I Like/rust-macros.md b/content/Programs I Like/rust-macros.md index 1393f3749..29b4ee4d2 100755 --- a/content/Programs I Like/rust-macros.md +++ b/content/Programs I Like/rust-macros.md @@ -5,7 +5,7 @@ tags: - misc - seedling date: 2024-02-28 -lastmod: 2024-02-28 +lastmod: 2024-08-31 --- Rust's [[Programs I Like/functional-programming|functional patterns]] are great, but sometimes you need to get weird. What if you want to construct a struct type, but you (the programmer) don't know what types the fields will be while you're writing this? Rust has you covered in situations just like this one. @@ -31,16 +31,16 @@ This macro creates a function at compile time as a member of the struct in `inpu There's also a way to iterate `Vec<>` inside macros with the `*` repetition operator. This operator has two parts, a body and a separator, but I couldn't find a satisfactory tutorial online. Here's my attempt: ```rust quote!{ - #(let #some_vec = 5);* + #(let #some_vec = 5;) * } ``` Here, everything inside the `#()` parenthetical will be repeatedly generated for each element of `some_vec`, with `#some_vec` expanding to the element at the current index. Presumably it contains the `Ident` s of some variable names of type `i32` that we want to declare and assign 5 to all of them in our macro. An expansion might look like: ```rust -let x = 5;let y = 5;let z = 5; +let x = 5; let y = 5; let z = 5; ``` -It's okay that it's not pretty because the compiler will see it as valid anyway. +Note that there will not be a space after the last semicolon, because the space is only a separator (goes between the elements), where the semicolon is part of the body (goes in every element). ## \#\[proc_macro_derive()\] [Rust traits](https://doc.rust-lang.org/book/ch10-02-traits.html) are powerful inheritance-like features that let the compiler know it can expect the "deriving" types to behave in the same way. What if you could generate trait implementations with a macro on the deriving type? diff --git a/content/Projects/latex.md b/content/Projects/latex.md new file mode 100644 index 000000000..8d07d4b90 --- /dev/null +++ b/content/Projects/latex.md @@ -0,0 +1,83 @@ +--- +title: A Working Latex Install that Doesn't Suck +tags: + - linux + - difficulty-easy + - programming + - writing +date: 2024-08-15 +lastmod: 2024-08-15 +draft: false +--- +$\LaTeX$ is a programming language and environment well-suited to academic writing (and it can be embedded in Quartz, too!). There are many different ways to use it in your work. The absolute easiest will be [Overleaf](https://overleaf.com), an online collaborative editor with many document templates and all common packages available. But for more specific use cases, or offline work, a latex "distribution" can be installed on your computer for you to manage your own packages. + +Now, system packages are seamless install-wise, but for latex distros specifically, they can be hard to navigate, especially on a distro like Arch which has rearranged its latex packages. Below is my guide for a no-frills, opinionated, "I want to use LaTeX" installation, complete with explanations for all the choices along the way. + +## TLDR + +```sh +$ sudo pacman --needed -S texlive-basic texlive-bin texlive-binextra texlive-bibtexextra texlive-latex texlive-latexrecommended okular neovim +$ nvim afile.tex +$ pdflatex afile.tex +``` + +## SETUP + +```bash +$ sudo pacman -Qsq ".*texlive.*|.*latex.*" # List all installed packages matching the regex +texlive-basic +texlive-bibtexextra +texlive-bin +texlive-binextra +texlive-latex +texlive-latexrecommended +$ +``` + +Installing all of the above will allow you to create `.tex` files and compile them to various outputs (PDF, HTML) with `latex file.tex` or `pdflatex file.tex`. + +Install `texstudio` for a seamlessly integrated editor. Otherwise, read on for an [[Programs I Like/code-editors#Neovim|nvim]] configuration recipe. + +If you're like me and like to modify where a program stores things to comport with the XDG basedir spec, please review the [table of environment variables](https://wiki.archlinux.org/title/TeX_Live#texmf_trees_and_Kpathsea) on the archwiki. +## Editor +[VimTeX](https://github.com/lervag/vimtex) is a great plugin supporting live view, inverse search, and more. Lazy config: + +```lua +{ + "lervag/vimtex", + lazy = false, + init = function() + vim.g.vimtex_view_method = "general" + end, +}, +``` + +It also requires global variables to be set up correctly for its features: + +```lua +-- Your nvim distro may have a native way to set globals, may also be done with vimscript +-- Modify both lines as needed for your chosen viewer +vim.g.vimtex_view_general_viewer = "okular" +vim.g.vimtex_view_general_options = "--unique file:@pdf#src:@line@tex" -- Goes in Okular -> Options -> Configure Okular -> Editor -> Custom -> command +``` + +## Explanation of packages +First off, I use TeXlive over MikTeX for security and feature reasons. I recommend `pdflatex` because of the [comparison by Overleaf](https://www.overleaf.com/learn/latex/Articles/The_TeX_family_tree%3A_LaTeX%2C_pdfTeX%2C_XeTeX%2C_LuaTeX_and_ConTeXt). And Okular is a fairly feature-packed but still performant pdf viewer/annotator, pretty common over Vimtex's other supported alternatives like Zathura and muPDF. + +Certain packages house the basic commands used to configure your install or generate files. Use `sudo pacman -F $(which someprogram)` to learn which package installs a specific command. Those are: + +``` +texlive-basic +texlive-bin +texlive-binextra +``` + +The *TeX* packages needed to generate most files are housed in other *arch* packages. You can find which CPAN is in which arch package with `tlmgr info thepackagename | grep collection`. It'll return `collection-somename`, which means that you need to install `texlive-somename` with pacman. I needed to install all these to resolve warnings and errors: + +``` +texlive-bibtexextra +texlive-latex +texlive-latexrecommended +``` + +Hope that helps! \ No newline at end of file diff --git a/content/Projects/my-computer.md b/content/Projects/my-computer.md index 92b036312..b4a0747f5 100755 --- a/content/Projects/my-computer.md +++ b/content/Projects/my-computer.md @@ -31,27 +31,32 @@ So yeah, it’s pretty cool. Here are my peripherals: - GTX3060Ti - Rated max power is 220W, but one DA-2 isn't enough because they're too old ## OS/Configuration -I run Fedora Linux with the GNOME desktop environment. I was a longtime Windows 10 user, and dabbled in Linux a bit, but eventually got fed up enough with Windows that I swapped for good. +I was a longtime Windows 10 user, and dabbled in Linux a bit, but eventually got fed up enough with Windows that I swapped for good. Currently, I'm running Arch Linux and X11 GNOME, highly customized. -Previously, I’ve also daily driven Linux Mint. You can read more about my history with Linux on [[Essays/on-linux|its dedicated page]]. +Previously, I’ve also daily driven Fedora and Linux Mint. You can read more about my history with Linux on [[Essays/on-linux|its dedicated page]]. ### On User Interface I’ve daily driven XFCE, Budgie, Unity, and KDE before. No DE really caught my eye in a way that feels both intuitive and productive until GNOME. The overview is such a neat concept that’s performant, useful for rapid task switching, and pretty. I recommend the Blur My Shell extension for best results, as well as an extension that gives you trackpad gestures for your windowing system. -- Little tip for productivity: if you use gestures, throw each new window on a different workspace and swipe instead of alt-tabbing. + +> [!tip] Productivity: +> If you use trackpad gestures, try putting each new window on a different workspace and use three-finger swipes instead of alt-tabbing. + ### Config! I use a [bare git repository](https://www.atlassian.com/git/tutorials/dotfiles) to backup all my small configuration files that are scattered throughout my computer. - Sidebar: I deviated from the tutorial and called my alias `dots` instead of `config`. It just felt better and there was no chance of confusion with Fedora's `configure` system utility. #### Config Hell -- There are a lot of little tweaks I do to software to make it fully useful to me, which is the one argument I’ve ever raised *against* compartmentalizing through Flatpak, Snap, etc. +- There are a lot of little tweaks I do to software to make it fully useful to me, which is the one argument I’ve ever personally raised *against* compartmentalizing through Flatpak, Snap, etc. - I have a bunch of Flatpak programs with absolutely no settings sync or remotely near the capability to sync, so what do I do when I want to migrate? - Hey kids wanna see a dead husk of a man? Come find me three hours after I update my [[code-editors#Neovim|Neovim]] install. Dear lord, that thing breaks OFTEN. ### Future? Upgrades are inevitable with any piece of hardware. Now that my GPU is up to a 3060ti from a 1650s, I'm looking to upgrade my cpu. I'll follow through on that when linux figures out thread scheduling on newer cpu die layouts (p-core e-core is still rough at the moment). ## Software -Any software that's actually of note can be found in [[Programs I Like/home|Programs I Like]]. Here, I'll just go over some tenets I've noticed when dealing with my computer as a tool for my work, my projects, and my personal life. +Any specific software that I like using can be found in [[Programs I Like/home|Programs I Like]]. Here, I'll just go over some tenets I've noticed when dealing with my computer as a tool for my work, my projects, and my personal life. I value low-[[Dict/resistance|resistance]], low-[[Dict/friction|friction]] software. It's what led me to pursue linux, Obsidian, and this website in general. If something is fast to use, I'll use it more often. #### Immutable Distros Something that's gaining popularity is the immutable operating system, where the underlying filesystem is intentionally resistant to change. I don't see this as overly resistant in my sense, mainly because providers like VanillaOS and Fedora Silverblue recognize that this resistance is present and provide alternative routes to install software. It's more of a compromise. ### Software future -I'll probably hop to Arch next since Fedora is going to drop X11 soon. Tiling wms like River and Hyprland look interesting, and I've played around with them on my 1tb Arch expansion card to mixed results. If I get frustrated with tiling, there's always arch gnome... \ No newline at end of file +Tiling WMs like River and scrolling WMs like Niri look interesting, and I've played around with them on my 1tb Arch expansion card to mixed results. Once NVIDIA Wayland support is MUCH more consistent and performant, I'll consider swapping and doing a pretty rice. + +Distro wise; I'm tracking SerpentOS. \ No newline at end of file diff --git a/content/Projects/rsgistry.md b/content/Projects/rsgistry.md index 07d405430..04d316f0e 100755 --- a/content/Projects/rsgistry.md +++ b/content/Projects/rsgistry.md @@ -1,5 +1,5 @@ --- -title: r/[es]/gistry +title: 🦀 rsgistry tags: - foss - "#rust" @@ -12,14 +12,14 @@ lastmod: 2024-02-28 --- [Repository](https://github.com/bfahrenfort/rsgistry) -I have a vision that all should take to write a customized, full-stack, ready-to-deploy registry web app for your packages or community extensions is editing a single type. More info to come soon. +I have a vision that all should take to write a customized, full-stack, ready-to-deploy registry web app for your packages or community extensions is editing a single type. Stylized as r\/\[es\]\/gistry, the app is geared towards reducing the overhead required for your community to be able to share their work built for-use-with or on-top-of your project. More info to come soon. This entry will be a technical overview of my implementation choices and program design. Documentation on actually using the codebase will be hosted in the repository. Enjoy! -- Sidebar: this was advanced for me but it will be extremely easy for someone with limited coding knowledge to fork and deploy in a way that supports their use case. +- Sidebar: this was advanced for me but it will (hopefully) be extremely easy for someone with limited coding knowledge to fork and deploy in a way that supports their use case. ## Background I’ve run into the same ecosystem problem in about three different spaces now: there’s a really robust system for **community extensions, but no real way to share them**. Either they’re too trivial for individual GitHub repositories, too non-tech-oriented, or still need some additional metadata hosted online in order to have a good API consumer UX. Thus, I’m adapting a test project into a batteries-included codebase for hosting a registry. API is in Axum and set up to be hosted for free on Shuttle, unsure about the frontend as of yet but looking at Leptos. -...The name stylization is just a regex joke. +...The name stylization is just a regex joke, but it has the added benefit of teaching you how to pronounce it. ## Implementation Details ### Macros I'm cultivating a tidbit on [[Programs I Like/rust-macros|Rust Macros]], so feel free to read for a practical introduction to the topic. diff --git a/content/Updates/2024/aug.md b/content/Updates/2024/aug.md new file mode 100644 index 000000000..a32dea3a7 --- /dev/null +++ b/content/Updates/2024/aug.md @@ -0,0 +1,21 @@ +--- +title: MM/YY - Summary of Changes +draft: true +tags: + - "#update" +date: 2024-08-31 +lastmod: 2024-08-31 +--- +## Housekeeping +I've now been maintaining this garden for a year! My productivity on it has not been what I've wanted it to be, but I can confidently say that working on this website does indeed help me with what I've outlined in [[Essays/why-i-garden|Why I Garden]]. +## Pages +- New: [[Projects/latex|A Working Latex Install that Doesn't Suck]] +- Content update: [[Essays/no-ai-fraud-act|No AI FRAUD Act]]. I'm considering breaking the essay into atomic pieces in the Resources folder so that it's more of a helpful reference than a comprehensive argumentative piece. Even though `Dict/` is normally where I put atomic files, that directory is for epistemologically neutral information which this essay is not. +- Content update: [[Essays/on-linux|The Linux Experience]] +- Content update: [[Projects/my-computer|My Computer]] +- Okay, for real this time, only two sections left on ai-infringement. +## Status Updates +- Toyed with the light mode color scheme to make it a bit more consistently readable across brightnesses. +- Updated the index again. +## Helpful Links +[[todo-list|Site To-Do List]] | [[Garden/index|Home]] diff --git a/content/curated.md b/content/curated.md index 0d5cef3d9..3efa9eb95 100755 --- a/content/curated.md +++ b/content/curated.md @@ -3,10 +3,12 @@ title: Reading List tags: - toc date: 2024-01-30 -lastmod: 2024-02-28 +lastmod: 2024-08-31 --- Here are some of the more interesting/mature works on my site organized by topic. ## Intro +- [[about-me|About Me]] +- [[bookmarks|What I'm Reading]] | [[todo-list|What I'm Writing]] - [[Essays/why-i-garden|Why I Garden]] - [[Projects/Obsidian/digital-garden#Using this Site|Using this site]] ## Legal diff --git a/content/index.md b/content/index.md index 9c6f832ed..695e19615 100755 --- a/content/index.md +++ b/content/index.md @@ -19,7 +19,7 @@ You're on a [[Dict/what-is-a-garden|Digital Garden]] dedicated to open-source us For a monthly list of what's new on the site, subscribe to the [Updates RSS feed](/Updates.xml). ## Important Links -[[about-me|About Me]] | [[curated|Recommended Reading]] | [[Misc/disclaimers|Disclaimers/Terms of Use]] | [[/Updates|Monthly Changelog]], [[todo-list|Up Next]] | Mastodon +[[curated|\(Optional\) Start here\!]] | [[Misc/disclaimers|Disclaimers/Terms of Use]] | [[/Updates|Monthly Changelog]], [[todo-list|Up Next]] | Mastodon

not legal advice 🤟 \ No newline at end of file diff --git a/quartz.config.ts b/quartz.config.ts index eebc8b029..faeef5b59 100644 --- a/quartz.config.ts +++ b/quartz.config.ts @@ -25,13 +25,13 @@ const config: QuartzConfig = { }, colors: { lightMode: { - light: "#EEEFFF", // Backg + light: "#F2F2F2", // Backg lightgray: "#bcc0cc", // Code Backg gray: "#8c8fa1", // Unread nodes and subtitles darkgray: "#45485f", // Text dark: "#4c4f69", // Code text - secondary: "#40a02b", // Links, title, and current node - tertiary: "#209fb5", // Visited nodes + secondary: "#325738", // Links, title, and current node + tertiary: "#0E8390", // Visited nodes highlight: "rgba(143, 159, 169, 0.15)", }, darkMode: { @@ -66,6 +66,7 @@ const config: QuartzConfig = { Plugin.CrawlLinks({ markdownLinkResolution: "shortest" }), Plugin.Description(), Plugin.Remark42({ host: "https://be-far.com/comments", site_id: "remark", theme: "dark", no_footer: true }), + Plugin.Latex({renderEngine: "katex"}) ], filters: [Plugin.RemoveDrafts()], emitters: [ diff --git a/quartz/styles/custom.scss b/quartz/styles/custom.scss index 58a7aa5e1..83927a389 100644 --- a/quartz/styles/custom.scss +++ b/quartz/styles/custom.scss @@ -91,6 +91,11 @@ blockquote > p { text-indent: 0px; } +#search-icon > p { + text-indent: 0px; + margin-bottom: 2px; +} + footer > p { margin-top: 2px; margin-bottom: 16px;