diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 5ecd92532..000000000
--- a/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-_drafts/
-_site/
-.DS_Store
-*.swp
-Gemfile.lock
-.sass-cache/
-.ruby-version
\ No newline at end of file
diff --git a/Gemfile b/Gemfile
deleted file mode 100644
index c1033afa7..000000000
--- a/Gemfile
+++ /dev/null
@@ -1,33 +0,0 @@
-source "https://rubygems.org"
-
-# Hello! This is where you manage which Jekyll version is used to run.
-# When you want to use a different version, change it below, save the
-# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
-#
-# bundle exec jekyll serve
-#
-# This will help ensure the proper Jekyll version is running.
-# Happy Jekylling!
-gem "jekyll", "~> 3.8.3"
-
-# This is the default theme for new Jekyll sites. You may change this to anything you like.
-gem "minima", "~> 2.0"
-
-# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
-# uncomment the line below. To upgrade, run `bundle update github-pages`.
-# gem "github-pages", group: :jekyll_plugins
-
-# If you have any plugins, put them here!
-group :jekyll_plugins do
- gem "jekyll-feed", "~> 0.6"
- gem 'jekyll-sitemap'
- gem 'jekyll-email-protect'
- gem 'jekyll-seo-tag'
- gem 'jekyll-paginate'
-end
-
-# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
-gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby]
-
-# Performance-booster for watching directories on Windows
-gem "wdm", "~> 0.1.0" if Gem.win_platform?
diff --git a/_config.yml b/_config.yml
deleted file mode 100644
index 680cc6efb..000000000
--- a/_config.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Jekyll configuration
-
-title: The C++ Alliance
-email: info@cppalliance.org
-url: http://cppalliance.org # the base hostname & protocol for your site, e.g. http://example.com
-# baseurl: /subpath # the subpath of your site, e.g. /blog
-description: >- # this means to ignore newlines until "baseurl:"
- The C++ Alliance is dedicated to helping the C++ programming language
- evolve. We see it developing as an ecosystem of open source libraries
- and as a growing community of those who contribute to those libraries..
-excerpt_separator: ""
-github_username: CPPAlliance
-twitter:
- username: CPPAlliance
- card: summary
- site: '@CPPAlliance'
-
-# Build settings
-markdown: kramdown
-
-# GitHub Flavored Markdown (GFM) settings
-kramdown:
- input: GFM
- auto_ids: true
- hard_wrap: false
- syntax_highlighter: none
- # syntax_highlighter: rouge
-
-theme: minima
-plugins:
- - jekyll-feed
- - jekyll-sitemap
- - jekyll-email-protect
- - jekyll-seo-tag
- - jekyll-paginate
-
-feed:
- posts_limit: 15
-
-# Pagination settings
-paginate: 5
-paginate_path: "/news/page:num/"
-
-# Twitter Card Setting
-title_image: "https://cppalliance.org/images/logo.png"
-
-# Exclude from processing.
-# The following items will not be processed, by default. Create a custom list
-# to override the default setting.
-# exclude:
-# - Gemfile
-# - Gemfile.lock
-# - node_modules
-# - vendor/bundle/
-# - vendor/cache/
-# - vendor/gems/
-# - vendor/ruby/
diff --git a/_includes/contact.html b/_includes/contact.html
deleted file mode 100644
index 0ae49db1b..000000000
--- a/_includes/contact.html
+++ /dev/null
@@ -1,40 +0,0 @@
-
-
-
diff --git a/_posts/2017-07-07-Beast-is-accepted-into-Boost.md b/_posts/2017-07-07-Beast-is-accepted-into-Boost.md
deleted file mode 100644
index 5cac15c05..000000000
--- a/_posts/2017-07-07-Beast-is-accepted-into-Boost.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: vinnie
-author-id: vinnie
----
-Beast, an HTTP and
-WebSocket protocol library written in C++11, becomes part of the
-Boost library collection.
diff --git a/_posts/2017-08-17-The-C++-Alliance-incorporates-In-California.md b/_posts/2017-08-17-The-C++-Alliance-incorporates-In-California.md
deleted file mode 100644
index 91168d488..000000000
--- a/_posts/2017-08-17-The-C++-Alliance-incorporates-In-California.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, vinnie
----
-The C++ Alliance, Inc. officially incorporates as a California
-501(c)(3) non-profit organization. The company is administered
-entirely as a virtual entity with no physical office.
diff --git a/_posts/2017-10-01-Louis-Tatta-joins-as-CEO.md b/_posts/2017-10-01-Louis-Tatta-joins-as-CEO.md
deleted file mode 100644
index 3f2ad1e6b..000000000
--- a/_posts/2017-10-01-Louis-Tatta-joins-as-CEO.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-title: "Louis Tatta joins as CEO"
----
-Louis Tatta joins the Alliance in the role of Chief Executive Officer,
-to oversee and administer the day to day operations of the company and
-carry out the mission.
diff --git a/_posts/2017-12-05-Foundation-Group-is-engaged-for-registration-service.md b/_posts/2017-12-05-Foundation-Group-is-engaged-for-registration-service.md
deleted file mode 100644
index 693f8ffb5..000000000
--- a/_posts/2017-12-05-Foundation-Group-is-engaged-for-registration-service.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-The Alliance engages
-Foundation Group,
-a non-profit formation and compliance services company. Foundation
-Group delivers a comprehensive 501(c)(3) registration service with
-a 100% IRS approval rate.
diff --git a/_posts/2017-12-10-Incorp-engaged-as-registered-agent.md b/_posts/2017-12-10-Incorp-engaged-as-registered-agent.md
deleted file mode 100644
index a6048235f..000000000
--- a/_posts/2017-12-10-Incorp-engaged-as-registered-agent.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-The Alliance engages
-InCorp Services, Inc.
-as the registered agent. InCorp provides National Registered
-Agent services in all 50 states and Washington, D.C.
diff --git a/_posts/2018-01-05-Administrate-Cpplang-slack-workspace.md b/_posts/2018-01-05-Administrate-Cpplang-slack-workspace.md
deleted file mode 100644
index c5fc0d5f8..000000000
--- a/_posts/2018-01-05-Administrate-Cpplang-slack-workspace.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-
-The Alliance is now the owner and administrator of the
-Cpplang Slack Workspace. This
-workspace is the premiere and most popular community of C++ enthusiasts
-and professionals from around the globe.
-
diff --git a/_posts/2018-01-09-Technical-committee-established.md b/_posts/2018-01-09-Technical-committee-established.md
deleted file mode 100644
index 6a995b0ff..000000000
--- a/_posts/2018-01-09-Technical-committee-established.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-The board of directors establishes the Technical Committee, whose job
-is to inform the CEO and board on all technical matters such as code
-review, resume review, the quality of papers, and other ongoing work.
diff --git a/_posts/2018-01-10-Glen-Fernandes-Joins-The-Technical-Committee.md b/_posts/2018-01-10-Glen-Fernandes-Joins-The-Technical-Committee.md
deleted file mode 100644
index 48f18b559..000000000
--- a/_posts/2018-01-10-Glen-Fernandes-Joins-The-Technical-Committee.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, glen
----
-Glen Joseph Fernandes, a Boost C++ library author, contributor, maintainer,
-joins as a technical advisor.
diff --git a/_posts/2018-01-16-Jon-Kalb-joins-the-board.md b/_posts/2018-01-16-Jon-Kalb-joins-the-board.md
deleted file mode 100644
index d3cf6a7ab..000000000
--- a/_posts/2018-01-16-Jon-Kalb-joins-the-board.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, jon
----
-Jon Kalb joins the Alliance board of directors as treasurer.
diff --git "a/_posts/2018-01-16-Ren\303\251-Rivera-joins-the-board.md" "b/_posts/2018-01-16-Ren\303\251-Rivera-joins-the-board.md"
deleted file mode 100644
index 6e3173c8a..000000000
--- "a/_posts/2018-01-16-Ren\303\251-Rivera-joins-the-board.md"
+++ /dev/null
@@ -1,6 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, rene
----
-René Rivera joins the Alliance board of directors as secretary.
diff --git a/_posts/2018-01-16-Vinnie-Falco-joins-the-board.md b/_posts/2018-01-16-Vinnie-Falco-joins-the-board.md
deleted file mode 100644
index fb0b6d51b..000000000
--- a/_posts/2018-01-16-Vinnie-Falco-joins-the-board.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, vinnie
----
-Vinnie Falco joins the Alliance board of directors as president.
diff --git a/_posts/2018-02-22-Corporate-logo-is-adopted.md b/_posts/2018-02-22-Corporate-logo-is-adopted.md
deleted file mode 100644
index bbb9b7c34..000000000
--- a/_posts/2018-02-22-Corporate-logo-is-adopted.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: Corporate Logo Is Adopted
-hero-image: 2018-02-22-Corporate-logo-is-adopted.png
----
-
-A new corporate logo is adopted from the conclusion of a contest on
-Designhill:
-
-
-
-
diff --git a/_posts/2018-05-06-Gold-sponsor-of-C++-Now.md b/_posts/2018-05-06-Gold-sponsor-of-C++-Now.md
deleted file mode 100644
index 0ee82add4..000000000
--- a/_posts/2018-05-06-Gold-sponsor-of-C++-Now.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: Gold sponsor of C++Now
----
-The Alliance is a Gold sponsor for
-C++Now 2018. This
-conference is a gathering of C++ experts and enthusiasts from around
-the world in beautiful Aspen, Colorado.
diff --git a/_posts/2018-05-30-Member-of-the-international-committee-for-information-technology-standards.md b/_posts/2018-05-30-Member-of-the-international-committee-for-information-technology-standards.md
deleted file mode 100644
index e1e68c03a..000000000
--- a/_posts/2018-05-30-Member-of-the-international-committee-for-information-technology-standards.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-The Alliance is member of the
-International Committee for Information Technology Standards.
-INCITS is the central U.S. forum dedicated to creating technology standards
-for the next generation of innovation. INCITS members combine their expertise
-to create the building blocks for globally transformative technologies. From
-cloud computing to communications, from transportation to health care
-technologies, INCITS is the place where innovation begins. Membership in
-INCITS allows voting in official WG21 meetings.
diff --git a/_posts/2018-08-06-The-Law-Firm-for-Non-Profits-engaged.md b/_posts/2018-08-06-The-Law-Firm-for-Non-Profits-engaged.md
deleted file mode 100644
index 5a74dd9b6..000000000
--- a/_posts/2018-08-06-The-Law-Firm-for-Non-Profits-engaged.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: The Law Firm for Non-Profits engaged
----
-The Alliance engages
-The Law Firm for Non-Profits
-for legal representation and services. They are passionate about
-supporting, advocating for and partnering with non-profits and the
-people behind them. For more than three decades, those looking for
-assistance with non-profit law throughout the United States and
-around the world have relied on the attorneys of The Law Firm
-for Non-Profits for superior legal and business guidance.
diff --git a/_posts/2018-08-13-Marshall-Clow-joins-as-staff-engineer.md b/_posts/2018-08-13-Marshall-Clow-joins-as-staff-engineer.md
deleted file mode 100644
index 70459f85b..000000000
--- a/_posts/2018-08-13-Marshall-Clow-joins-as-staff-engineer.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, marshall
-title: Marshall Clow joins as a Staff Engineer
-author-id: marshall
----
-
-Marshall Clow joins the Alliance as a Staff Engineer. Previously,
-he worked at Qualcomm for many years. Most of his time is spent
-working on
-libc++,
-the C++ standard library implementation for LLVM. He is also a member
-of the
-C++ standards committee,
-currently serving as the chair of LWG, the library working group.
-Marshall has been contributing to the
-Boost libraries
-since 2001, and is the author of the
-Boost.Algorithm
-library. Furthermore he maintains several other boost libraries,
-and moderates some of the boost mailing lists. Finally, Marshall
-has graciously taken on the role of release manager for several
-Boost versions.
diff --git a/_posts/2018-09-03-Damian-Jarek-joins-as-staff-engineer.md b/_posts/2018-09-03-Damian-Jarek-joins-as-staff-engineer.md
deleted file mode 100644
index 97c117940..000000000
--- a/_posts/2018-09-03-Damian-Jarek-joins-as-staff-engineer.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
----
-Damian Jarek joins the Alliance as Staff Engineer. Previously he worked on a
-number of embedded networking projects for a few major clients. As a Staff
-Engineer he’ll be working on an open-source companion library for Boost.Beast
-and Boost.Asio, which will abstract away the platform-specific details of
-acessing system proxy settings and performing TLS verification of a peer
-certificate chain using the operating system’s key store.
diff --git a/_posts/2018-09-23-Gold-sponsor-of-Cppcon-2018.md b/_posts/2018-09-23-Gold-sponsor-of-Cppcon-2018.md
deleted file mode 100644
index ace275c09..000000000
--- a/_posts/2018-09-23-Gold-sponsor-of-Cppcon-2018.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-
-The Alliance is a Gold sponsor for
-CppCon 2018. This
-conference is the annual, week-long face-to-face gathering for the
-entire C++ community. The conference is organized by the C++ community
-for the community. Attendees enjoy inspirational talks and a friendly
-atmosphere designed to help individuals learn from each other, meet
-interesting people, and generally have a stimulating experience.
-
diff --git a/_posts/2018-10-24-Initial-work-on-Certify-complete.md b/_posts/2018-10-24-Initial-work-on-Certify-complete.md
deleted file mode 100644
index e78ed48d8..000000000
--- a/_posts/2018-10-24-Initial-work-on-Certify-complete.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-author-id: damian
----
-# Initial work on `Certify` complete
-It's been mentioned in my initial blog post that I'd be working on a TLS
-certificate store abstraction library, with the intent of submitting it for
-formal review for Boost, at some point in the (hopefully near) future.
-The initial setup phase (things that every Software Engineer hates) is more
-or less complete. CI setup was a bit tricky - getting OpenSSL to run with
-the boost build system on both Windows and Linux (and in the future MacOS)
-has provided a lot of "fun" thanks to the inherent weirdness of OpenSSL.
-
-The test harness currently consists of two test runners that loads certificates
-from a database (big name for a folder structure stored in git) that has the
-certificate chains divided into two groups. Chains that will fail due to various
-reasons (e.g. self-signed certificates, wrong domain name) and ones that will pass
-(when using a valid certificate store). I'm still working on checking whether
-the failure was for the expected reason. All the verification is done offline
-(i.e. no communication with external servers is performed, only chain verification).
-
-At this point it looks like I should consider, whether the current design of
-the verification code is a good approach. Using the verification callback
-from OpenSSL and asio::ssl is quite an easy way of integrating the platform-specific
-certificate store API it causes issues with error propagation (transporting a platform-specific
-error through OpenSSL) and may be fairly slow, because it requires certificates to be
-reencdoded into the DER format so that they can be fed into the platform-specific API.
-An alternative to this approach would be load the entire root certificate store, along with CRLs and
-OCSP configuration into an OpenSSL context. This is potentially a little bit harder to get right but
-may offer better performance (no reencoding required when veryfing certificate chains) and eliminates
-the issues related to error handling. Further investigation, as to which approach is better, is required.
-
-Don't forget to star the repository: https://github.com/djarek/certify!
diff --git a/_posts/2018-11-13-WG21-San-Diego-Trip-Report.md b/_posts/2018-11-13-WG21-San-Diego-Trip-Report.md
deleted file mode 100644
index a94444b14..000000000
--- a/_posts/2018-11-13-WG21-San-Diego-Trip-Report.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: standards, marshall
-author-id: marshall
----
-# WG21 San Diego Meeting
-
-Last week was the fall 2018 WG21 standard committee meeting. It was held
-in San Diego, which is my hometown. The fact that I helped organize it
-(while I was working at Qualcomm) had absolutely no affect on the location,
-I assure you. ;-)
-
-This was the largest WG21 meeting ever, with 180 attendees. The last meeting
-(in Rapperswil, Switzerland) had about 150 attendees, and *that* was the
-largest one until now. There were more than 270 papers in the pre-meeting
-mailing; meaning that people were spending weeks reading papers to prepare
-for the meeting. Herb Sutter (the convener) has been telling everyone that
-new papers received after the San Diego meeting were out of scope for C++20,
-and apparently people took him at his word.
-
-This was my first meeting representing the C++ Alliance (though hardly my
-first overall). The Alliance was well represented, with Rene, Glen, Vinnie,
-Jon and myself attending. For information about how WG21 is structured,
-please see [isocpp.org](https://isocpp.org/std).
-
-I spent all of my time in LWG, since that's the group that I chair, and the
-one that has the most influence over libc++, the library that I work on.
-
-The big news from a library POV was that we voted to merge an updated paper
-based on the Ranges TS into the draft standard; which means that (barring
-catastrophe) that it will be part of C++20. This was a huge paper, weighing
-in at 220+ pages. We spent several days in LWG reviewing this (and a bunch
-of time at previous meetings as well).
-
-We also moved a bunch (around 25) of smaller papers; too many to list here.
-
-Detailed trip reports can be found around the web:
-
-* [Herb Sutter](https://herbsutter.com/2018/11/13/trip-report-fall-iso-c-standards-meeting-san-diego/)
-* [Reddit](https://www.reddit.com/r/cpp/comments/9vwvbz/2018_san_diego_iso_c_committee_trip_report_ranges/)
-
-The next WG21 meeting is in Kona, HI February 18-23rd.
diff --git a/_posts/2019-01-14-MarshallsJanuaryUpdate.md b/_posts/2019-01-14-MarshallsJanuaryUpdate.md
deleted file mode 100644
index d2224b295..000000000
--- a/_posts/2019-01-14-MarshallsJanuaryUpdate.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's January Update
-author-id: marshall
----
-
-Monthly update (or, what Marshall did in December)
-
-There are three main areas where I spend my time.
-
-* Boost
-* Libc++
-* WG21, where I am the chair of the Library Working Group (LWG)
-
-----
-Boost:
-December was a big month for boost, and much of the first part of the month was taken up with the release process. I was the release manager for the 1.69.0 release, which went live on 12-December. The final release process was fairly straighforward, with only one release candidate being made/tested - as opposed to the beta, which took _three_. In any case, we had a successful release, and the I (and other boost developers) are now happily working on features/bug fixes for the 1.70 release - which will occur in March.
-
-----
-Libc++:
-After the WG21 meeting in November, there was a bunch of new functionality to be added to libc++. The list of new features (and their status) can be seen [on the libc++ website](https://libcxx.llvm.org/cxx2a_status.html). My major contributions of new features in December were [Consistent Container Erasure](https://wg21.link/P1209R0), [char8_t: A type for UTF-8 characters and strings](https://wg21.link/P0482), and [Should Span be Regular?](https://wg21.link/P1085R2), and a big chunk of [Extending to Calendars and Time Zones](https://wg21.link/P0355R7).
-
-This is all pointing towards the January 16th "branch for release", and for the scheduled March release of LLVM 8.0.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never ending task; there are new contributions ever day.
-
-----
-WG21
-
-Being between meetings (November -> February) there was not any special WG21 work to be done in December. There's an ongoing stream of bug reports, discussion, paper reviews that get done between meetings, and there is a series of papers that I need to finish for the pre-Meeting mailing deadline on 21-January. I have 1 1/2 done, and need to do 3-4 more.
diff --git a/_posts/2019-03-01-Adler-Colvin-engaged.md b/_posts/2019-03-01-Adler-Colvin-engaged.md
deleted file mode 100644
index 555322a19..000000000
--- a/_posts/2019-03-01-Adler-Colvin-engaged.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: Adler & Colvin engaged
----
-The Alliance engages
-Adler & Colvin
-to complete IRS Form 1023, Application for Recognition of Exemption Under Section 501(c)(3) of the Internal Revenue Code. Completing this form can be a daunting task because of the legal and tax technicalities you'll need to understand. Adler & Colvin is a group of seasoned attorneys based in San Francisco, deeply committed to serving the legal needs of the nonprofit sector. The firm brings an unrivaled depth of expertise and passion to its representation of tax-exempt organizations and individual philanthropists.
-
diff --git a/_posts/2019-03-04-MarshallsMarchUpdate.md b/_posts/2019-03-04-MarshallsMarchUpdate.md
deleted file mode 100644
index 47e55d70a..000000000
--- a/_posts/2019-03-04-MarshallsMarchUpdate.md
+++ /dev/null
@@ -1,124 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's March Update
-author-id: marshall
----
-
-Monthly update (or, what Marshall did in January and February)
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-# Libc++
-
-The LLVM "branch for release" occurred in January, and there was a bit of a rush to get things into the LLVM 8 release. Now that is over, and we're just watching the test results, seeing if anyone finds any problems with the release. I don't anticipate any, but you never know.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-After the branch, I started working on new features for the LLVM 9 release (for this summer). More calendaring stuff, new C++20 features, and some C++17 features that haven't been done yet.
-
-### LWG papers implemented in Jan/Feb
-
-* P0355: Extending to Calendars and Time Zones. You may remember this from last month's update; this is a huge paper, and I am landing it in stages.
-* P1024: tuple-like interface to span
-* P1227: Signed ssize() functions
-* P1357: Traits for [Un]bounded Arrays
-
-### LWG issues implemented in Jan/Feb (certainly incomplete)
-
-* LWG3101: span's Container constructors need another constraint
-* LWG3144: span does not have a const_pointer typedef
-* Enabled a `memcpy` optimization for const vectors that was surprisingly missing
-
-### LLVM bugs resolved in Jan/Feb (probably incomplete)
-
-* [Bug 28412](https://llvm.org/PR28412) `std::vector` incorrectly requires CopyConstructible, Destructible and other concepts
-* [Bug 39183](https://llvm.org/PR39183) tuple comparison operators return true for tuples of different sizes
-* [Bug 24411](https://llvm.org/PR24411) libFuzzer outputs that crash libc++'s regex engine
-* [Bug 34330](https://llvm.org/PR34330) error: use of undeclared identifier 'isascii' while compiling strstream.cpp
-* [Bug 38606](https://llvm.org/PR38606) no_sanitize("unsigned-integer-overflow") annotation for decremented size_type in `__hash_table`
-* [Bug 40533](https://llvm.org/PR40533) `std::minmax_element` is 3 times slower than hand written loop
-* [Bug 18584](https://llvm.org/PR18584) SD-6 Feature Test Recommendations
-* [Bug 40566](https://llvm.org/PR40566) Libc++ is not Implicit Integer Truncation Sanitizer clean
-* [Bug 21715](https://llvm.org/PR21715) 128-bit integers printing not supported in stl implementation
-* [Bug 38844](https://llvm.org/PR38844) `__cpp_lib_make_unique` not defined in <memory>
-* [Bug 40495](https://llvm.org/PR40495) `is_invokable_v` does not compile
-* [Bug 40270](https://llvm.org/PR40270) `std::basic_stringstream` is not working with `std::byte`
-* [Bug 39871](https://llvm.org/PR39871) `std::tuple_size` should be a struct
-* [Bug 38052](https://llvm.org/PR38052) `std::fstream` still good after closing and updating content
-
-Also, there was a series of general cleanups in the libc++ tests to improve portability.
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-
-# WG21
-
-The "winter" WG21 meeting was held in Kona, HI on February 18-24. This was the last meeting for new features for C++20, and as such, it was both contentious and very busy.
-
-The Modules TS and the Coroutines TS were both adopted for C++20, along with a slew of language features.
-
-Here are some trip reports:
-* [Herb Sutter](https://herbsutter.com/2019/02/23/trip-report-winter-iso-c-standards-meeting-kona/)
-* [Bryce Adelstein Lelbach](https://www.reddit.com/r/cpp/comments/au0c4x/201902_kona_iso_c_committee_trip_report_c20/)
-* [Guy Davidson](https://hatcat.com/?p=69)
-
-
-My part in this was (as always) to chair the Library Working Group (LWG), the group responsible for the description of the library features in the standard (~1000 pages).
-We adopted several new features for C++20:
-
-* P0339R6 polymorphic_allocator<> as a vocabulary type
-* P0340R3 Making std::underlying\_type SFINAE-friendly
-* P0738R2 I Stream, You Stream, We All Stream for istream_iterator
-* P0811R3 Well-behaved interpolation for numbers and pointers
-* P0920R2 Precalculated hash values in lookup
-* P1001R2 Target Vectorization Policies from Parallelism V2 TS to C++20
-* P1024R3 Usability Enhancements for std::span
-* P1164R1 Make create_directory() Intuitive
-* P1227R2 Signed ssize() functions, unsigned size() functions
-* P1252R2 Ranges Design Cleanup
-* P1357R1 Traits for [Un]bounded Arrays
-
-I wrote five substantive papers for the Kona meeting, all were adopted. Five of them were very similar, all about improving the wording in the standard, rather than proposing new features.
-
-* [P1458](https://wg21.link/P1458) Mandating the Standard Library: Clause 16 - Language support library
-* [P1459](https://wg21.link/P1459) Mandating the Standard Library: Clause 18 - Diagnostics library
-* [P1462](https://wg21.link/P1462) Mandating the Standard Library: Clause 20 - Strings library
-* [P1463](https://wg21.link/P1463) Mandating the Standard Library: Clause 21 - Containers library
-* [P1464](https://wg21.link/P1464) Mandating the Standard Library: Clause 22 - Iterators library
-
-I was also the nominal author of [P1457](https://wg21.link/P1457) "C++ Standard Library Issues to be moved in Kona", but that was just a list of issues whose resolutions we adopted.
-
-Between now and the next meeting (July), LWG will be working on reviewing papers and issues to be adopted in July. I'm planning regular teleconferences (in fact, we had the first one on 1-March).
-
-The goal of the July meeting is to have a "Committee Draft" (CD) of the proposed C++20 standard that can be sent out for review.
-
-
-# Boost
-
-It's been a quiet couple of months for Boost, since we're between releases, and I have been busy with libc++ and WG21 activities. There have been a few bugs to chase down, and the dealing with change requests for the libraries whose maintainers have "moved on" takes some time.
-
-However, it's time for another Boost release (1.70), and I will be acting as the release manager again. The release calendar is available (as always) on [the Boost website](https://www.boost.org/development).
-
-The beta release is schedule for March 13th, and the final release for 10-April.
-
-# Conferences
-
-I had submitted talk proposals to three conferences, and all three were accepted. Hence, I will be speaking at:
-
-* [LLVM European Developer's Conference](https://llvm.org/devmtg/2019-04), April 8-9 in Brussels
-* [ACCU](https://conference.accu.org), April 10-13 in Bristol
-* [CppNow](http://www.cppnow.org), May 5-10 in Aspen, CO
-
diff --git a/_posts/2019-03-16-Certify-X509-validation.md b/_posts/2019-03-16-Certify-X509-validation.md
deleted file mode 100644
index 0e62980d4..000000000
--- a/_posts/2019-03-16-Certify-X509-validation.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-author-id: damian
----
-# Certify - X509 certificate validation
-I always knew that validating a certificate chain presented by a peer is not an
-easy procedure, but my recent work in Certify to port over the procedure from
-Chromium has only proven that I underestimated the complexity of it. Certificate
-revocation seems to be a particularly hard issue, with 2 main categories of
-solutions - offline and online validation.
-
-## Online validation - OCSP
-OCSP is a protocol designed to allow checking the revocation status of a
-certificate by sending a request over a subset of HTTP/1.1. At first glance, it
-seems it solves the status checking problem on its own. However, OCSP has
-problems, inherent to online checking.
-
-First of all, the validation server might not be currently available - so a lack
-of response is most definitely not a state in which a chain can be trusted.
-Secondly, the check may be slow, after all, it requires connecting to a separate
-service. Additionally, the native Windows API for certificate verification does
-the status check synchronously, which means potentially blocking a user's thread
-that typically services asynchronous operations. There is a feature that
-alleviates most of these issues, at least from the point of view of a TLS
-client, OCSP stapling. Sadly, it's not very widespread and actually few large
-services support it, due to the fact that it increases bandwidth requirements.
-Certify will, at some point support both OCSP status checks on the client side
-and support for OCSP stapling. The problem here is that OCSP requires a fairly
-functional HTTP client and ASN.1 parsing. A lot of this functionality is already
-present in OpenSSL, however, integrating it with ASIO and Beast may be tricky.
-
-
-## Offline validation - CRLs and Google CRLSets
-The traditional method of checking the status of a certificate involves looking
-up revocation lists installed in the OS's store, or downloaded by the
-application from the CA. Unfortunately CRLs have issues - an example would be an
-incident from a few years ago when CloudFlare performed a mass revocation which
-blew up the size of the CRLs by a few orders of magnitude, resulting in a
-requirement to download multiple megabytes of data, turning CAs into a major
-performance bottleneck. Google came up with a different mechanism, called
-CRLSets, which involves a periodic download of a revocation list which is
-created by Google's crawler querying certificate status over OCSP. This
-verification method is fairly attractive for applications that run on systems
-that already have Google products, since this database is shared, which is why
-I've chosen to provide an opt-in implementation in Certify. For now, updating
-the database will be out of scope, because that requires a few utilties that are
-missing from Boost at this time (XML, JSON and an HTTP Client).
-
-Don't forget to star the repository: https://github.com/djarek/certify!
diff --git a/_posts/2019-04-02-MarshallsAprilUpdate.md b/_posts/2019-04-02-MarshallsAprilUpdate.md
deleted file mode 100644
index ca1727d0c..000000000
--- a/_posts/2019-04-02-MarshallsAprilUpdate.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's March Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-This month, I spent far more time reviewing other people's code and preparing talks for conferences than the previous few months. The Boost release process consumed a fair chunk of time as well.
-
-# Libc++
-
-The big news is: we released LLVM 8 this month! (March 20th). You can get the sources and pre-built binaries from the [LLVM download page](http://releases.llvm.org/download.html#8.0.0), or wait for your system vendor to provide you with an update.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-### LWG papers implemented this month.
-
-* [P0811](https://wg21.link/P0811) `std::midpoint` for integral and pointer types. This turned out to be *quite* involved, and spawned a [clang bug report](https://bugs.llvm.org/show_bug.cgi?id=40965). On the plus side, now I have a topic for a talk for CppCon this fall.
-
-Still to do, `std::midpoint` for floating point types. This is done, but it needs better tests.
-
-### LWG issues implemented this month
-
-* I didn't actually commit any LWG issue fixes this month. I worked with others on several bug fixes that landed, but not under my name.
-
-### LLVM features implemented this month (certainly incomplete)
-
-* Add noexcept to `operator[]` for `array` and `deque`
-* Mark `vector::operator[]` and `front`/`back` as noexcept
-* Mark `front()` and `back()` as noexcept for `array`/`deque`/`string`/`string_view`
-* Make `to_chars`/`from_chars` work back to C++11. This lets us use them in `to_string`.
-
-
-### LLVM bugs resolved this month (probably incomplete)
-
-* [Bug 35967](https://llvm.org/35967) <regex> `syntax_option_type` is not a proper bitmask
-* _No bug #_ Fix a minor bug with `std::next` and `prev` not handling negative numbers.
-* _No bug #_ Cleanup of requirements for `optional` - we no longer allow `optional`
-* [Bug 41130](https://llvm.org/41130) `operator/` of `std::chrono::duration` and custom type.
-
-Also, there was a series of general cleanups in the libc++ tests to improve portability and readability. Eric and I (mostly Eric) revamped the debug-mode support, and there will be more activity there in the future. Also, we're moving towards using more of the `ASSERT_XXXX` macros for readability, and I revamped about 30 of the tests to use them. Only several thousand to go!
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-
-# WG21
-
-The "winter" WG21 meeting was held in Kona, HI on February 18-24. This was the last meeting for new features for C++20, and as such, it was both contentious and very busy.
-
-Between now and the next meeting (July), LWG will be working on reviewing papers and issues to be adopted in July. We have had three teleconferences since Kona, and a fourth is scheduled for mid-April.
-
-I am working on more "cleanup" papers similar to [P1458 - Mandating the Standard Library: Clause 16 - Language support library](https://wg21.link/P1458), and my [P0805 - Comparing Containers](https://wg21.link/P0805) needs an update.
-
-The goal of the July meeting is to have a "Committee Draft" (CD) of the proposed C++20 standard that can be sent out for review.
-
-
-# Boost
-
-It's time for another Boost release (1.70), and I am acting as the release manager again. The release calendar is available (as always) on [the Boost website](https://www.boost.org/development).
-
-The cut-off for contributions for the release is 3-April, with a release candidate to follow close behind, and the actual release to happen on the 10th.
-
-Once the release is over, I'll be putting some serious time into Boost.Algorithm; there are a bunch of C++17/20 algorithms that can be added to the library (among other things).
-
-# Conferences
-
-I had submitted talk proposals to three conferences, and all three were accepted.
-
-I will be speaking at:
-
-* [LLVM European Developer's Conference](https://llvm.org/devmtg/2019-04), April 8-9 in Brussels
-* [ACCU](https://conference.accu.org), April 10-13 in Bristol
-* [CppNow](http://www.cppnow.org), May 5-10 in Aspen, CO
-
diff --git a/_posts/2019-04-04-DamiansAprilUpdate.md b/_posts/2019-04-04-DamiansAprilUpdate.md
deleted file mode 100644
index da6439e16..000000000
--- a/_posts/2019-04-04-DamiansAprilUpdate.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-title: Damian's March Update
-author-id: damian
----
-
-This month I've been working on the following projects:
-- Certify
-- Boost.Beast
-- Boost.Build
-- BeastLounge
-
-# Certify
-Certify now properly verifies the hostname of a TLS server according to RFC 2818
-or TLS-DANE if available. Additionally, initial support for CRLSets has been
-merged, although it's still missing integration into the verification code.
-
-I've also invested a fair bit of time into researching what other open source
-libraries do to perform certificate status checking. I've looked into BoringSSL,
-mbedTLS, Botan and even the Go standard library. It's interesting that no
-library has a default way of performing the status check of a certificate and
-it's left up to the user.
-
-The Windows implementation of the certificate store in Certify will now properly
-use the entire chain passed by the peer, which resolves certificate failures in
-less common cases.
-
-Don't forget to star the repository: [https://github.com/djarek/certify](https://github.com/djarek/certify)!
-
-# Boost.Beast
-Most of the work this month involved making Beast compile faster and use less
-memory by expanding the code that can use split compilation and reducing redundant
-dependencies in a few places.
-
-# Boost.Build
-I've worked on implementing 2 improvements that make it less painful to work with b2:
-- support for finding OpenSSL
-- support for sanitizers in gcc and clang
-Both are currently still in review.
-
-# BeastLounge
-The project lacked functioning CI so I implemented one. Since the project was
-previously only compiled on MSVC, this proved to be quite challenging, because
-MSVC accepts code that is not valid C++11. I've also created a deplyoment docker
-image, which allows running the application in popular cloud environments, like
-Heroku. A development version of the app is available at [https://beast-lounge.herokuapp.com/](https://beast-lounge.herokuapp.com/).
diff --git a/_posts/2019-05-01-MarshallsMayUpdate.md b/_posts/2019-05-01-MarshallsMayUpdate.md
deleted file mode 100644
index 35e54d2d0..000000000
--- a/_posts/2019-05-01-MarshallsMayUpdate.md
+++ /dev/null
@@ -1,93 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's April Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-# Libc++
-
-The next big milestone for libc++ is the LLVM 9.0 release this summer. We're working towards that, implementing new features and fixing bugs.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-### LWG papers implemented this month.
-
-* [P0811](https://wg21.link/P0811) Add `std::midpoint` and `std::lerp` for C++20
-
-
-### LWG issues resolved this month
-
-* [2960](https://wg21.link/lwg2960) nonesuch is insufficiently useless
-* [2977](https://wg21.link/lwg2977) unordered_meow::merge() has incorrect Throws: clause
-* [2164](https://wg21.link/lwg2164) What are the semantics of `vector.emplace(vector.begin(), vector.back())`?
-
-
-### LLVM features implemented this month (certainly incomplete)
-
-* Fixed the implementations of `list::remove_if` and `list::unique` to deal with values or predicates that are elements in the list. Same for `forward_list`. We did this for `remove` already, but now we do it for the other operations as well.
-
-* Added a bunch of new tests for things that we were missing
-** `list::sort` and `forward_list::sort` are required to be stable.
-** You can't use `match_results` until you've done a regex search. Our tests did this in several places; now we have assertions to prevent that.
-`
-
-### LLVM bugs resolved this month (probably incomplete)
-
-
-* [Bug 41323](https://llvm.org/PR41323) Race condition in `steady_clock::now` for `_LIBCPP_WIN32API`
-* [Bug 41130](https://llvm.org/PR41130) `operator/` of `std::chrono::duration` and custom type.
-* [Bug 41577](https://llvm.org/PR41577) test/std/utilities/optional/optional.object/optional.object.ctor/move.fail.cpp has wrong assumption.
-* I spent a fair amount of time on [Bug 39696](https://llvm.org/PR39696) "Workaround "error: '(9.223372036854775807e+18 / 1.0e+9)' is not a constant expression"; which turned out to be a GCC bug on PowerPC machines.
-
-
-Also, there was a series of general cleanups in the libc++ tests to improve portability and readability. I added a bunch of updates for debug-mode, and there were several places where we assumed that `string::compare` returned `-1/0/1` instead of what was specified, which is `\<0/0/\>0`. Also, I added tests for `std::any_cast` and array types.
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-
-# WG21
-
-There were no WG21 meetings in April. However, LWG held three teleconferences this month, reviewing papers in advance of the July meeting. We'll have more teleconferences in May.
-
-I am working on more "cleanup" papers similar to [P1458 - Mandating the Standard Library: Clause 16 - Language support library](https://wg21.link/P1458), and my [P0805 - Comparing Containers](https://wg21.link/P0805) needs an update.
-
-The goal of the July meeting is to have a "Committee Draft" (CD) of the proposed C++20 standard that can be sent out for review.
-
-Also on my TODO list is to attempt to implement some of the proposals that are coming up for a vote in July (`flat_map`, text formatting, etc).
-
-# Boost
-
-We released [Boost 1.70](https://www.boost.org/users/history/version_1_70_0.html) on the 12th of April.
-
-Once again, I was the release manager, which involved a bunch of "process management"; things like assembling the release candidates, packaging up release notes, deciding which problems that came up would be fixed (and which ones would not), and updating the web site (and so on, and so on).
-
-
-
-# Conferences
-
-This was a big travel month. I gave two presentations:
-
-* At the [LLVM European Developer's conference](https://llvm.org/devmtg/2019-04/) in Brussels, I gave a 30 minute overview of the changes that were coming to the standard library for C++20.
-
-* At [ACCU](https://conference.accu.org/) in Bristol, England, I gave a talk titled ["Navigating the development and evolution of a library"](https://conference.accu.org/2019/sessions.html#XNavigatingthedevelopmentandevolutionofalibrary)
-
-
-In May, I will be speaking at:
-* [CppNow](http://www.cppnow.org), May 5-10 in Aspen, CO
-
-I have submitted a talk for [CppCon](https://www.cppcon.com) in September, but I will not hear back about this for a month or two.
diff --git a/_posts/2019-05-05-DamiansMayUpdate.md b/_posts/2019-05-05-DamiansMayUpdate.md
deleted file mode 100644
index 9d6064735..000000000
--- a/_posts/2019-05-05-DamiansMayUpdate.md
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-title: Damian's April Update
-author-id: damian
----
-
-This month I've been working on the following projects:
-- Certify
-- Boost.Beast
-- Boost.Build
-- BeastLounge
-
-# Certify
-Certify did not have any platform-independent means of caching certificate
-status (i.e. revoked, valid, unknown), so I implemented one. For now it has to
-be manually filled, but I'll add a way to import a static blacklist (somewhat
-similar to the builtin blacklist in Chrome) and query the status of a
-certificate. Unfortunately there is no way to handle OCSP stapling within the
-verification callback invoked by OpenSSL which is quite detrimental to
-usability. Additionally, OpenSSL doesn't have a way of starting and waiting for
-an asynchronous operation within callbacks (without blocking).
-
-Don't forget to star the repository: [https://github.com/djarek/certify](https://github.com/djarek/certify)!
-
-# Boost.Beast
-When working on making sure Beast is `std::launder`-correct, I ran into a number
-of previously undiagnosed bugs in Beast. All of them have been fixed in
-[v254](https://github.com/boostorg/beast/pull/1598/files). I was quite confused
-why these issues weren't found by CI previously. I've been able to track it down
-to old toolchain versions in Travis. Additionally, the test matrix lacks a few
-fairly important variants. Considering the fact that Trusty is no longer
-supported and the switch to Xenial is inevitable, I've decided to port over the
-CI to Azure Pipelines, because it offers better concurrency which allows the
-Beast CI to afford a larger test matrix. In the process I've also decided to use
-as many default b2 options as possible, to make future changes to the CI easier.
-There's still an issue with Valgrind in Xenial to be resolved (doesn't support
-the `RDRAND` instruction).
-
-# Boost.Build
-While working on the AzP CI for Beast, I found out that the `coverage` feature
-in `b2` doesn't actually set build flags. `coverage=all` will now properly cause
-tests to produce `gcno` and `gcda` files for consumption by the lcov tool.
-
-# BeastLounge
-When experimenting with the BeastLounge application running on Heroku I found
-out that Heroku's router has a builtin 55s timeout which dropped websocket
-connections. I solved the issue by making the websocket ping timeouts
-configurable.
diff --git a/_posts/2019-06-01-MarshallsJuneUpdate.md b/_posts/2019-06-01-MarshallsJuneUpdate.md
deleted file mode 100644
index c97aebf31..000000000
--- a/_posts/2019-06-01-MarshallsJuneUpdate.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's May Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-# Libc++
-
-The next big milestone for libc++ is the LLVM 9.0 release this summer. We're working towards that, implementing new features and fixing bugs.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-This month was spent concentrating on code reviews and bug reports; so I implemented very little "new code".
-
-There was a lot of "infrastructure work" done on libc++ this month; a large cleanup of the test suite (still in progress), a bunch of work on debug mode for the library (also still in progress)
-
-### LWG issues resolved this month in libc++
-
-* [2960](https://wg21.link/lwg3204) `sub_match::swap` only swaps the base class
-
-### LLVM features implemented this month (certainly incomplete)
-
-* Improved the behavior of the compiler intrinsic `__is_base_of`. Clang no longer generates an error when you ask about inheritance relationships with unions, even if the non-union class is incomplete. This intrinsic is used by libc++ to implement `std::is_base_of`.
-
-* Fixed a few `regex` bugs, and improved the `regex` tests in C++03.
-
-### LLVM bugs resolved this month (probably incomplete)
-
-* [Bug 42037](https://llvm.org/PR42037) C++2a `std::midpoint``'s "Constraints" are not implemented
-* [Bug 41876](https://llvm.org/PR41876) `std::hash` should not accept `std::basic_strings` with custom character traits
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-
-# WG21
-
-There were no WG21 meetings in April. However, LWG held one teleconference this month, reviewing papers in advance of the July meeting. We'll have more teleconferences in June.
-
-I am working on more "cleanup" papers similar to [P1458 - Mandating the Standard Library: Clause 16 - Language support library](https://wg21.link/P1458), and my [P0805 - Comparing Containers](https://wg21.link/P0805) needs an update.
-
-The goal of the July meeting is to have a "Committee Draft" (CD) of the proposed C++20 standard that can be sent out for review.
-
-Also on my TODO list is to attempt to implement some of the proposals that are coming up for a vote in July (`flat_map`, text formatting, etc).
-
-# Boost
-
-It's been a quiet month for boost (except for C++ Now, the conference formerly known as BoostCon).
-
-There are a couple of good trip reports for C++Now:
-* [Matthew Butler](https://maddphysics.com/2019/05/13/cnow-2019-trip-report/)
-* [JeanHeyd Meneide](https://thephd.github.io/c++now-2019-trip-report)
-
-The next [Boost release cycle](https://www.boost.org/development/index.html) is starting soon; with the deadline for new libraries coming up later this month. I'm hoping to mentor a new release manager with this release.
-
-
-# Conferences
-
-Another travel month. I spent a bunch of time away from home, but only one conference:
-
-* At [C++ Now](http://www.cppnow.org) in Aspen, CO, I presented "The View from a C++ Standard Library Implementor", which was voted the runner-up for "Most Engaging" talk.
-
-I have submitted a talk for [CppCon](https://www.cppcon.com) in September, but I will not hear back about this for a month or two.
-
-I have submitted talks for [C++ Russia](https://cppconf-piter.ru/en/) and [Meeting C++](https://meetingcpp.com), which are both very close (timewise) to the Belfast WG21 meeting, but I haven't heard back yet.
-
-I am looking forward to being at home for the entire month of June.
diff --git a/_posts/2019-06-10-DamiansJuneUpdate.md b/_posts/2019-06-10-DamiansJuneUpdate.md
deleted file mode 100644
index b5e130211..000000000
--- a/_posts/2019-06-10-DamiansJuneUpdate.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-title: Damian's May Update
-author-id: damian
----
-
-This month I've been working on the following projects:
-- Certify
-- Boost.Beast
-- Boost.Build
-
-# Certify
-This month, I've worked on expanding the documentation of Certify, especially
-the example and introduction parts. When looking through the documentation for
-Boost.Build I found out it's possible to import snippets from *.cpp files
-into the documentation, which will allow me to make sure that snippets in
-the documentation compile and are tested. I've also attempted cleaning up the
-Certify build script to use the OpenSSL module in b2, but I ran into issues, so
-I'll have get back to this one in the future.
-
-Don't forget to star the repository: [https://github.com/djarek/certify](https://github.com/djarek/certify)!
-
-# Boost.Beast
-I've been able to complete the port of the Beast CI to Azure Pipelines and expand
-the test matrix beyond what was tested in the existing CI infrastructure. Thanks
-to the expanded concurrent job limit, a full run on AzP takes less time than a
-full Travis and Appveyor build, especially when wait times are taken into accout.
-One of the matrix items I added were tests for header-only no-deprecated builds,
-which turned out to be broken. Untested code has a nasty tendency to rot.
-I've also been able to identify some function templates in `http::basic_fields`
-which could be turned into regular functions. One of them, was instantiated
-4 times because they were passed a predicate which was a lambda expression.
-These two changes turned out to be fairly significant, because they allowed
-shaving off at least 10 KiB of binary size per instantiation (amd64, -O3).
-
-# Boost.Build
-When working on the Azure Pipelines CI for Beast I noticed that b2 doesn't support
-the leak sanitizer, so I decided to add it. It's available via the `leak-sanitizer=on` feature.
diff --git a/_posts/2019-07-02-MarshallsJulyUpdate.md b/_posts/2019-07-02-MarshallsJulyUpdate.md
deleted file mode 100644
index a345ead35..000000000
--- a/_posts/2019-07-02-MarshallsJulyUpdate.md
+++ /dev/null
@@ -1,99 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's June Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-# Libc++
-
-The next big milestone for libc++ is the LLVM 9.0 release this summer. We're working towards that, implementing new features and fixing bugs. The "Branch for release" is currently scheduled for July 18th.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-I created a [status page](https://libcxx.llvm.org/upcoming_meeting.html) for the LWG issues and papers that are already set up for a vote at the Cologne WG21 meeting.
-
-
-### LWG issues resolved this month in libc++ (almost certainly incomplete)
-
-* [LWG2221](https://wg21.link/LWG2221) No formatted output operator for `nullptr`
-* [LWG3206](https://wg21.link/LWG3206) `year_month_day` conversion to `sys_days` uses not-existing member function
-
-
-### LLVM features implemented this month (almost certainly incomplete)
-
-* [P0553](https://wg21.link/P0553) Bit operations
-* [P0556](https://wg21.link/P0556) Integral power-of-2 operations
-* [P1355](https://wg21.link/P1355) Exposing a narrow contract for `ceil2`
-* [P0646](https://wg21.link/P0646) Improving the Return Value of Erase-Like Algorithms I
-
-
-### LLVM bugs resolved this month (probably incomplete)
-
-* [Bug 41843](https://llvm.org/PR41843) `std::is_base_of` should give correct result for incomplete unions
-* [Bug 38638](https://llvm.org/PR38638) Wrong constraint for `std::optional::operator=(U&&)`
-* [Bug 30589](https://llvm.org/PR30589) `std::complex` with a custom type does not work because of how std::__promote is defined
-* [Bug 42396](https://llvm.org/PR42396) Alignment not respected in containers for over-aligned enumeration types
-* [Bug 28704](https://llvm.org/PR28704) `num_get::do_get` incorrect digit grouping check
-* [Bug 18074](https://llvm.org/PR18074) Undefined references when using pointer to member functions
-* [Bug 26503](https://llvm.org/PR26503) `std::quoted` doesn't work with `char16_t` or `char32_t` strings.
-* [Bug 41714](https://llvm.org/PR41714) `std::tuple<>` is not trivially constructible
-* [Bug 36863](https://llvm.org/PR36863) `basic_string_view(const CharT*, size_type)` constructor shouldn't comment out assert of nullptr and length checks
-* [Bug 42166](https://llvm.org/PR42166) `to_chars` can puts leading zeros on numbers
-
-
-### Other LLVM bits from this month (certainly incomplete)
-
-* [Revision 364545](https://llvm.org/r364545) Provide hashers for `string_view` only if they are using the default `char_traits`. Reported on [StackOverflow](https://stackoverflow.com/questions/56784597/is-libc-providing-hash-specialization-for-too-many-basic-string-views/56792608#56792608)
-
-* Reworked `to_string` to use `to_chars`. Much faster, and avoids having multiple implementations. This involved reworking `to_chars` so that it was available back to C++03. _I did all of the `to_chars` refactoring, but not the `to_string` rework._
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-# WG21
-
-The next WG21 meeting is July 15-20 in Cologne, Germany.
-
-There were no WG21 meetings in June. We (LWG) held four teleconference this month, reviewing papers in advance of the July meeting, and will have another one next week.
-
-I had seven papers in the pre-Cologne mailing:
-* [P1718R0: Mandating the Standard Library: Clause 25 - Algorithms library](https://wg21.link/P1718)
-* [P1719R0: Mandating the Standard Library: Clause 26 - Numerics library](https://wg21.link/P1719)
-* [P1720R0: Mandating the Standard Library: Clause 28 - Localization library](https://wg21.link/P1720)
-* [P1721R0: Mandating the Standard Library: Clause 29 - Input/Output library](https://wg21.link/P1721)
-* [P1722R0: Mandating the Standard Library: Clause 30 - Regular Expression library](https://wg21.link/P1722)
-* [P1723R0: Mandating the Standard Library: Clause 31 - Atomics library](https://wg21.link/P1723)
-* [P1724R0: C++ Standard Library Issues to be moved in Cologne](https://wg21.link/P1724)
-
-The goal of the July meeting is to have a "Committee Draft" (CD) of the proposed C++20 standard that can be sent out for review.
-
-Also on my TODO list is to attempt to implement some of the proposals that are coming up for a vote in July (`flat_map`, text formatting, etc).
-
-# Boost
-
-The next [Boost release cycle](https://www.boost.org/development/index.html) is in process; I am helping Michael Caisse as release manager with this release.
-
-
-# Conferences
-
-Upcoming talks:
-* [C++ Russia](https://cppconf-piter.ru/en/) is at the end of October in St. Petersburg.
-* [Meeting C++](https://meetingcpp.com) is in mid-November in Berlin.
-
-I have submitted a talk for [CppCon](https://www.cppcon.com) in September, but I will not hear back about this for a month or two.
-
-I submitted a talk for [ACCU Autumn](https://conference.accu.org), which is in Belfast right after the WG21 meeting, but I haven't heard back about that yet. In any case, I will be attending this conference, since it's in the same hotel as the WG21 meeting, and starts two days after the WG21 meeting, and concludes right before Meeting C++.
diff --git a/_posts/2019-07-14-DamiansJulyUpdate.md b/_posts/2019-07-14-DamiansJulyUpdate.md
deleted file mode 100644
index 1154aad1d..000000000
--- a/_posts/2019-07-14-DamiansJulyUpdate.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-title: Damian's June Update
-author-id: damian
----
-
-This month I've been working on the following projects:
-- Certify
-- Boost.Beast
-
-# Certify
-After quite a bit of work exploring ways to make certificate verification more complete,
-I've concluded that Boost is currently missing a few tools to make that viable.
-A comprehensive solution requires, at the very least, a functional HTTP client
-able to handle higher-level semantics like redirects, proxies or compressed bodies.
-While these are unlikely to happen while performing an OCSP query or downloading
-a CRL set from Google's update service, they still need to be handled, otherwise
-the user will be left in a no better state than when no library is used.
-At this point, Certify only offers basic verification, but that is still
-simillar level to what cURL does. Providing a comprehensive solution will require
-either a infrastructure solution (something like Google's CRLsets) or
-a library based one (i.e. build up all the required libraries to be able
-to perform proper certificate status checks).
-
-# Boost.Beast
-I've continued the work on expanding split compilation in Beast, by turning some
-internal function templates, in websocket code, into regular functions. Additionally,
-I've simplified the websocket prng code after proving with some benchmarks that the
-previous solution made it worse both for the fast case (with TLS enabled)
-and the slow one. The speed up is marginal, but it made the code much simpler
-and reduced the size of binaries by small amount (a few K at best).
-I've also worked to cleaning up some of the compilation warnings that I found
-using the new Azure Piepelines CI in Beast. I also had to deal with an an odd case of
-miscompilation under MSVC 14.2 (x64 Release), where the use of `static_string<7>`
-failed tests with paritally garbage output while `static_string<8>` succeeded.
-
diff --git a/_posts/2019-08-05-MarshallsAugustUpdate.md b/_posts/2019-08-05-MarshallsAugustUpdate.md
deleted file mode 100644
index 557f2ee37..000000000
--- a/_posts/2019-08-05-MarshallsAugustUpdate.md
+++ /dev/null
@@ -1,118 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's July Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-This month, the big news (and the big work item) was the approval of the C++ "Committee Draft" at the WG21 meeting in Cologne on July 15-20.
-
-You can think of this as a "beta version" of the C++20 standard; all features are complete. The next step is bug fixing, with an eye towards releasing next year.
-
-# Libc++
-
-The LLVM 9.0 release is on track for September. We have a release branch, and the RC1 was recently dropped.
-
-Because of the run-up and the aftermath of the Cologne meeting, the libc++ accomplishments are a bit sparse this month.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-### LWG issues resolved this month in libc++ (almost certainly incomplete)
-
-* [LWG2273](https://wg21.link/LWG2273) `regex_match` ambiguity
-
-
-### LLVM features implemented this month (almost certainly incomplete)
-
-* [P1612](https://wg21.link/P1612) Relocate endian
-* [P1466](https://wg21.link/P1466) Parts of P1466 "Misc Chrono fixes" more to come here.
-
-
-### LLVM bugs resolved this month (definitely incomplete)
-
-
-
-### Other interesting LLVM bits from this month (certainly incomplete)
-
-* [Revision 365854](https://llvm.org/r365854) Reorganize the `` header to make most of the facilities available for internal use pre-C++20. NFC for external users.
-
-* [Revision 367120](https://llvm.org/r367120) Fix a bug in std::chrono::abs where it would fail when the duration's period had not been reduced.
-
-* [Revision 364884](https://llvm.org/r364884) Add an internal call `__libcpp_is_constant_evaluated`, which works like `std::is_constant_evaluated`, except that it can be called at any language level (back to C++98). For older languages, it just returns `false`. This gets rid of a lot of ifdefs in the libc++ source code.
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-# WG21
-
-As I said above, we shipped a CD out of Cologne. Now we wait for the National Bodies (members of ISO, aka "NBs") to review the draft and send us comments. When we've resolved all of these comments, we will send the revised draft out for balloting. If the NBs approve, then that draft will become C++20.
-
-We approved many new features for C++20 in Cologne:
-* [P0553](https://wg21.link/0553) - Bit Operations
-* [P0980](https://wg21.link/0980) - Constexpr `string`
-* [P1004](https://wg21.link/1004) - Constexpr `vector`
-* [P1065](https://wg21.link/1065) - Constexpr `INVOKE`
-* [P1135](https://wg21.link/1135) - The C++20 Synchronization Library
-* [P1208](https://wg21.link/1208) - Source Location
-* [P0645](https://wg21.link/0645) - Text Formatting
-* [P1361](https://wg21.link/1361) - Integration of `chrono` with text formatting
-* [P1754](https://wg21.link/1754) - Rename concepts to standard\_case for C++20, while we still can
-* [P1614](https://wg21.link/1614) - Spaceship integration in the Standard Library
-* [P0600](https://wg21.link/0600) - Stop Tokens and a Joining Thread
-* [P0631](https://wg21.link/0631) - Math Constants
-
-We also did not approve many proposed features. Most of these were not approved because we ran out of time, rather than any fault of theirs:
-* [P1391](https://wg21.link/1391) - Range constructors for `string_view`
-* [P1394](https://wg21.link/1394) - Range constructors for `span`
-* [P0288](https://wg21.link/0288) - `any_invokable`
-* [P0201](https://wg21.link/0201) - `polymorphic_value`
-* [P0429](https://wg21.link/0429) - A Standard flatmap
-* [P1222](https://wg21.link/1222) - A Standard flatset
-* [P0533](https://wg21.link/0533) - constexpr for cmath
-* [P0792](https://wg21.link/0792) - `function_ref`
-* [P0881](https://wg21.link/0881) - A Proposal to add stacktrace library
-* [P1272](https://wg21.link/1272) - Byte-swapping
-* [P0627](https://wg21.link/0627) - Function to mark unreachable code
-* _and many others_
-
-
-I still have a bunch of mechanical changes that need to be made before we ship:
-* [P1718R0: Mandating the Standard Library: Clause 25 - Algorithms library](https://wg21.link/P1718)
-* [P1719R0: Mandating the Standard Library: Clause 26 - Numerics library](https://wg21.link/P1719)
-* [P1720R0: Mandating the Standard Library: Clause 28 - Localization library](https://wg21.link/P1720)
-* [P1721R0: Mandating the Standard Library: Clause 29 - Input/Output library](https://wg21.link/P1721)
-* [P1722R0: Mandating the Standard Library: Clause 30 - Regular Expression library](https://wg21.link/P1722)
-* [P1723R0: Mandating the Standard Library: Clause 31 - Atomics library](https://wg21.link/P1723)
-
-We polled the NBs before Cologne, and they graciously agreed to have these changes made post-CD.
-
-# Boost
-
-The next [Boost release cycle](https://www.boost.org/development/index.html) is in process; I am helping Michael Caisse as release manager with this release. We should have a release in the next couple of weeks.
-
-
-# Conferences
-
-Upcoming talks:
-* [CppCon](https://www.cppcon.com) in September in Denver.
-* [C++ Russia](https://cppconf-piter.ru/en/) is at the end of October in St. Petersburg.
-* [ACCU Autumn](https://conference.accu.org) is right after the WG21 meeting in early November.
-* [Meeting C++](https://meetingcpp.com) is in mid-November in Berlin.
-
-I will be making the "Fall 2019 C++ European Tour", going from St. Petersburg to Belfast to Berlin before heading home mid-November.
diff --git a/_posts/2019-08-19-DamiansAugustUpdate.md b/_posts/2019-08-19-DamiansAugustUpdate.md
deleted file mode 100644
index 7d3f01318..000000000
--- a/_posts/2019-08-19-DamiansAugustUpdate.md
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, damian
-title: Damian's July Update
-author-id: damian
----
-
-# Boost.Beast
-
-I've started working on improvements to the zlib part of Beast. There are some gaps
-in the test harness of these components, so I've decided to increase coverage.
-As a first step, I started porting test cases from the original zlib library's tests,
-to verify that existing code matches the expected behavior of the original library.
-Fortunately, I've not found any significant discrepancies, there's only one issue
-where Beast rejects malformed input for the wrong reason (I'm still looking into it
-whether it's actually an issue at the time of writing this).
-
-I've also looked into providing more meaningful feedback from test failures in Beast,
-especially when they're run in CI. While the current test framework does print
-a line number on failure, the line number is often in a function template that's called
-by multiple test cases, which makes it quite hard to determine which test failed
-just from the log, often requiring the use of a debugger. Doing that locally
-may not be a problem, but it's significantly harder in CI, so I've decided to
-try to use Boost Stacktrace to provide a callstack on each failure in Beast tests.
-Additionally, I've also worked on running the test suite without OpenSSL installed,
-to hopefully fix some of the failures in the official Boost test matrix.
-
-# The question of Networking TS and TLS
-
-There's recently been quite a bit of discussion of networking being useless
-without "secure by default" sockets. Since this is a recurring topic and I expect it to return in the future,
-so I've decided to write up an analysis of this issue.
-
-First of all, I believe that an attempt to deliver a "secure by default" socket
-within the current networking proposal right now will result in something like
-`std::async` - not really practically useful.
-
-What kind of TLS facilities I'd consider useful for the average user of the standard library?
-A reasonable guideline, I think, are ones I could trust to be used in a distributed
-system that handles money (in any form).
-Note, that TLS is not only a protocol that provides confidentiality (i.e. encryption),
-but also allows verification of the identity either the server by the client, or both.
-Remember, doesn't matter if 3rd parties can't see what you're sending,
-if you're sending your data to the wrong peer in the first place!
-
-While it may seem simple at first look, just verifying the identity of a peer
-is an extremely complex process, as my experience with Certify has shown.
-Doing it portably and efficiently with the same interface and effects is extremely difficult.
-Browsers resort to all kinds of workarounds and custom solutions to be able
-to securely implement just this one aspect of TLS. I attempted to implement
-a library (intended for inclusion into Boost) that would perform this one aspect,
-however, I found it to be impossible to provide a practical solution with
-the current state of the networking ecosystem in Boost. In fact, one method
-of certificate verification (via the OCSP protocol) requires a (very) basic
-HTTP client. Yes, in order to perform a TLS handshake and verify the peer's
-certificate status using OCSP, you need an HTTP client.
-
-This is just one aspect of the TLS protocol that needs to be addressed.
-There are others as well - what about the basic cryptographic building blocks,
-like ciphers, hashing algorithms, PRFs and so on - they are bound to be used
-in a hypothetical implementation in a standard library, should they be exposed? If yes then with what interface?.
-Considering that there are no standard networking facilities and not even a proposal for standard TLS,
-this is a discussion that would essentially postpone standard networking indefinitely.
-
-Finally, there's also an opposite position that no networking should be
-in the standard at all. I disagree with this position - networking has become a very important
-part of many C++ projects (in my career, all C++ projects I dealt with, touched
-some sort of network in one way or another).
-At the very least we need standard named requirements for library compatibility, since that is
-severely lacking in the ecosystem at this point.
diff --git a/_posts/2019-09-01-Gold-sponsor-of-Cppcon-2019.md b/_posts/2019-09-01-Gold-sponsor-of-Cppcon-2019.md
deleted file mode 100644
index 83e65b46d..000000000
--- a/_posts/2019-09-01-Gold-sponsor-of-Cppcon-2019.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
----
-
-The Alliance is a Gold sponsor for
-CppCon 2019. This
-conference is the annual, week-long face-to-face gathering for the
-entire C++ community. The conference is organized by the C++ community
-for the community. Attendees enjoy inspirational talks and a friendly
-atmosphere designed to help individuals learn from each other, meet
-interesting people, and generally have a stimulating experience.
-
diff --git a/_posts/2019-09-27-MarshallsOctoberUpdate.md b/_posts/2019-09-27-MarshallsOctoberUpdate.md
deleted file mode 100644
index ab898bed4..000000000
--- a/_posts/2019-09-27-MarshallsOctoberUpdate.md
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: marshall
-title: Marshall's Combined August and September Update
-author-id: marshall
----
-
-There are four main areas where I spend my time.
-
-* Libc++, where I am the "code owner"
-* WG21, where I am the chair of the Library Working Group (LWG)
-* Boost
-* Speaking at conferences
-
-Lots of work these month(s) behind the scenes, getting stuff ready for C++20, LLVM 9, and Boost 1.71.0.
-
-# Libc++
-
-The [LLVM 9.0 release](http://releases.llvm.org/download.html#9.0.0) has shipped! The release date was 19-September, a few days later than planned. There are a lot of new [libc++ features](http://releases.llvm.org/9.0.0/projects/libcxx/docs/ReleaseNotes.html) in the release.
-
-As the "code owner" for libc++, I also have to review the contributions of other people to libc++, and evaluate and fix bugs that are reported. That's a never-ending task; there are new contributions ever day.
-
-Many times, bug reports are based on misunderstandings, but require a couple of hours of work in order to figure out where the misunderstanding lies.
-
-We're working on a major redesign of the "debug mode" for libc++, after we realized that the existing (not widely used) debug mode is useless when you're trying to do things at compile (constexpr) time.
-
-I have been spending a lot of time the last few weeks working on the calendaring stuff in ``, specifically the interface with the OS for getting time zone information. It is a surprisingly complicated task. Fortunately for me, I have a friend who has been down this road in the past, and is willing to answer questions.
-
-### LWG issues resolved in libc++ (almost certainly incomplete)
-
-* [LWG3296](https://wg21.link/LWG3296) Add a missing default parameter to `regex::assign`
-
-### LLVM features implemented (almost certainly incomplete)
-
-* [P1466](https://wg21.link/P1466) Parts of P1466 "Misc Chrono fixes" more to come here.
-
-### LLVM bugs resolved (definitely incomplete)
-
-* [Bug 42918](https://llvm.org/PR42918) Fix thread comparison by making sure we never pass our special 'not a thread' value to the underlying implementation
-
-* [Bug 43063](https://llvm.org/PR43063) Fix a couple of unguarded `operator,` calls in algorithm
-
-* [Bug 43034](https://llvm.org/PR43034) Add a missing `_VSTD::` before a call to `merge`.
-
-* [Bug 43300](https://llvm.org/PR43300) Add a missing `_VSTD::` Only initialize the streams `cout`/`wcout`/`cerr`/`wcerr` etc once, rather than any time `Init::Init` is called
-
-### Other interesting LLVM bits from (certainly incomplete)
-
-* [Revision 368299](https://llvm.org/r368299) Implement `hh_mm_ss` from [P1466](https://wg21.link/P1466). Part of the ongoing `` implementation work.
-
-
-The current status of libc++ can be found here:
-* [C++20 status](https://libcxx.llvm.org/cxx2a_status.html)
-* [C++17 status](https://libcxx.llvm.org/cxx1z_status.html)
-* [C++14 status](https://libcxx.llvm.org/cxx1y_status.html) (Complete)
-* [Libc++ open bugs](https://bugs.llvm.org/buglist.cgi?bug_status=__open__&product=libc%2B%2B)
-
-
-# WG21
-
-We shipped a CD out of Cologne in July. Now we wait for the National Bodies (members of ISO, aka "NBs") to review the draft and send us comments. When we've resolved all of these comments, we will send the revised draft out for balloting. If the NBs approve, then that draft will become C++20.
-
-The next WG21 meeting will be November 2-8 in Belfast, Northern Ireland.
-This will be the first of two meetings that are focused on resolving NB comments; the second one will be in Prague in February.
-
-I have several "clean-up" papers for the Belfast mailing. The mailing deadline is a week from Monday (5-October), so I need to finish them up.
-
-* [P1718R0: Mandating the Standard Library: Clause 25 - Algorithms library](https://wg21.link/P1718)
-* [P1719R0: Mandating the Standard Library: Clause 26 - Numerics library](https://wg21.link/P1719)
-* [P1720R0: Mandating the Standard Library: Clause 28 - Localization library](https://wg21.link/P1720)
-* [P1721R0: Mandating the Standard Library: Clause 29 - Input/Output library](https://wg21.link/P1721)
-* [P1722R0: Mandating the Standard Library: Clause 30 - Regular Expression library](https://wg21.link/P1722)
-* [P1723R0: Mandating the Standard Library: Clause 31 - Atomics library](https://wg21.link/P1723)
-
-We polled the NBs before Cologne, and they graciously agreed to have these changes made post-CD.
-
-# Boost
-
-[Boost 1.71.0](https://www.boost.org/users/history/version_1_71_0.html) was released on 19-August. Micheal Caisse was the release manager, with some help from me.
-
-As part of the Boost Community maintenance team, I (and others) made many changes to libraries whose authors are no longer able (or interested) in maintaining them.
-
-I have a couple of suggestions for additions to the Boost.Algorithms library that I will be working on in the near future.
-
-
-# Conferences
-
-I was a speaker at [CppCon](https://www.cppcon.com) last week. I gave a new talk "std::midpoint - How hard could it be?" (no link yet) which was quite well received. I got a few questions that will require additional research, and may improve my implementation.
-
-I also participated in the "Committee Fireside Chat", at CppCon, where conference members get to ask questions of the committee members who are present.
-
-
-Upcoming talks:
-* [LLVM Developer's Conference](http://llvm.org/devmtg/2019-10/) is in San Jose in October. I will not be speaking, but I will be moderating the lightning talks.
-* [C++ Russia](https://cppconf-piter.ru/en/) is at the end of October in St. Petersburg.
-* [ACCU Autumn](https://conference.accu.org) is right after the WG21 meeting in early November.
-* [Meeting C++](https://meetingcpp.com) is in mid-November in Berlin.
-
-I will be making the "Fall 2019 C++ European Tour", going from St. Petersburg to Belfast to Berlin before heading home mid-November.
diff --git a/_posts/2019-3-06-Gold-sponsor-of-C++-now.md b/_posts/2019-3-06-Gold-sponsor-of-C++-now.md
deleted file mode 100644
index dcc0a0618..000000000
--- a/_posts/2019-3-06-Gold-sponsor-of-C++-now.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: Gold sponsor of C++Now 2019
----
-The Alliance is a Gold sponsor for
-C++Now 2019. This
-conference is a gathering of C++ experts and enthusiasts from around
-the world in beautiful Aspen, Colorado from May 5, 2019 - May 10, 2019.
diff --git a/_posts/2020-01-31-RichardsJanuaryUpdate.md b/_posts/2020-01-31-RichardsJanuaryUpdate.md
deleted file mode 100644
index 508069801..000000000
--- a/_posts/2020-01-31-RichardsJanuaryUpdate.md
+++ /dev/null
@@ -1,185 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's January Update
-author-id: richard
----
-
-# History
-
-This is my first entry on the C++ Alliance web site. I'm very happy to say that I was invited to join the organisation
-at the end of December last year.
-
-I first met Vinnie on Slack when I chose to use [Boost.Beast](https://github.com/boostorg/beast) in a
-greenfield project - a highly scalable market data distribution system and quoting gateway for the Japanese
-cryptocurrency exchange [liquid.com](http://liquid.com).
-
-There were a number of candidates for C++ HTTP frameworks and it is interesting for me to examine the decision-making
-process I went through in choosing one.
-
-If I am honest, there are two main factors that influenced me towards Boost.Beast:
-
-1. I am a long-time fanboi of [Boost.Asio](https://github.com/boostorg/asio). I find it's paradigm very pleasing.
-Once you decipher the (extremely terse!) documentation it becomes obvious that it was written by a hyper-intelligent
-extraterrestrial masquerading as a human being.
-
-2. I have used the [Boost Library](https://www.boost.org/) (or more correctly, libraries) for many years.
-Boost has become synonymous with trust, quality and dependability. As far as I have always been concerned,
-boost is *the* standard. The standard library has always been a pale shadow of it.
-
-When I started the new project there was an expectation that I would have a team to work with. In the end, I found
-myself writing the entire system alone from scratch.
-
-Liquid Tap contains two fully-featured web servers (one facing the organisation and one facing the outside world),
-supports inbound and outbound websocket connectivity (with per-fqdn keepalive connection pooling) and multi-threaded
-operation. The project took me 3 months from writing the first line of code to full production readiness.
-
-I was personally impressed by the speed with which I was able to assimilate a new library and create a fairly complex
-service infrastructure using nothing more than boost, nlohmann_json, openssl and a c++17 compiler. In my view
-this would not have been possible without the excellent documentation and careful attention to detail in Boost.Beast.
-
-During the development, I reached out to Vinnie and Damian on Slack a number of times. Both were always helpful
-and attentive. Without a doubt they were instrumental in the success of my project.
-
-So here I am in January 2020. Just like the old TV advert where Victor Kayam was so impressed with his electric
-shaver that, "I bought the company".
-
-I was so impressed with Boost.Beast and its creators that when given the chance, I chose to join the company.
-
-# First Month
-
-I have spent my first month with the firm going through Vinnie's Boot Camp. It's been an interesting and invigorating
-experience.
-
-I have many years of experience writing code for production environments, from embedded systems like slot machines
-and video games to defence to banking and trading systems. I'm fairly confident that if you can describe it, I can
-write it.
-
-But maintaining a publicly-facing library is a new and very different challenge.
-
-## Controlling the Maintenance Burden
-
-C++ is a language in which types are cheap. So cheap in fact that many people (including me) recommend describing any
-concept in a program as its own type. This is a great way to cause logic errors to fail to compile, rather than fail
-to impress your customers.
-
-But in a library such as Boost.Beast, every public type you create is a type you must document and maintain. If you
-discover that your type has a design flaw, you're a little stuck. Any future changes need to be source-compatible
-with previous versions of the library. Dangerous or incorrect elements in the design must be deprecated gracefully.
-All this requires careful management.
-
-Management takes time.
-
-Something I learned from Vinnie very quickly is that for this reason, interfaces to public objects should provide
-the bare minimum functionality that users can demonstrate a need for. Adding a new public method or class should only
-happen after careful consideration of the consequences.
-
-## Supporting all Toolchains
-
-Another consideration I have never had to encounter before is that Boost.Beast is designed to work on every compiler
-that "robustly supports" C++11.
-
-In my career as a software engineer I have always demanded (and mostly had) autonomy over the choice of toolset. Of
-course I have always chosen the very latest versions of gcc, clang and msvc and used the very latest standard of
-c++ available. Why wouldn't I? It improves productivity, and who wants to be stuck in the Dark Ages when all your
-friends are using the new cool stuff?
-
-I wrote Liquid Tap in C++17. If C++2a had been more advanced and supported by all compilers at the time I'd have used
-that, because compiler-supported coroutines would have shortened the code and made debugging and reasoning about
-sequencing a whole lot easier.
-
-Now I find myself thinking about how to support the latest features of the language, while ensuring that the library
-will continue to function for the many C++11 and 14 users who have not been as fortunate as me and are still
-constrained by company policy, or indeed are simply happy not to have to learn the new features available in more
-recent standards.
-
-## Attention to Detail
-
-Boost.Beast strives for 100% (or as close to it as possble) coverage in testing. This is only logical. Users are not
-going to be happy if they have to continually decipher bugs in their programs caused by us, file bug reports and
-either patch their Boost source or wait up to three months for another release.
-
-In addition, documentation matters. You know how it is in a production system. More effort is spent on content and
-utility than documentation. Developers are often expected to read the code or ask someone if there's something they
-don't understand. Not so when maintaining a library for public consumption.
-
-One of the reasons I chose Boost.Beast was the quality, completeness and accuracy of its documentation. This is no
-accident. Vinnie and his team have put a lot of time into it. Only now am I becoming aware of what an Herculean
-task this is.
-
-Users will hold you to your word, so your word had better be the _Truth, the Whole Truth and Nothing But the Truth_.
-
-# Activities
-
-## Issue Maintenance
-
-This month I have been working through some of the Issue backlog in Boost.Beast. It's satisfying to see PRs getting
-accepted and the list of open issues being whittled away. At the moment it's interesting to see new issues and
-queries being raised too. I'll revisit this in next month's blog and report as to whether it's still interesting
-then :)
-
-I have also been taking time to liaise with users of the library when they raise queries via the
-[Issue Tracker](https://github.com/boostorg/beast/issues), email or the
-[Slack Channel](https://cpplang.slack.com/archives/CD7BDP8AX). I think staying in touch with the users is an excellent
-way to get feedback on the quality of documentation and design. It's also nice to be able to help people. Not something
-you get time to do very often when working on an FX-options desk in an investment bank.
-
-## Boost.Json
-
-I have been providing some support to the upcoming [Boost.JSON](https://github.com/vinniefalco/json) library.
-This library focusses on:
-* Absolute correctness in reference to [RFC8259](https://datatracker.ietf.org/doc/rfc8259/).
-* Seeking to match or exceed the performance of other c++ JSON libraries such as [RapidJSON](https://rapidjson.org/).
-* Providing a clean, unsurprising programming interface and impeccable documentation.
-
-This is a fascinating project for me. Various JSON libraries employ various tricks for improving performance.
-Performance can be gained at the expense of rigorous syntax checking, use of buffers and so on. Achieving the Holy
-Grail of full correctness, minimal state and absolute performance will be an interesting challenge.
-
-## Boost.URL
-
-Vinnie is also working on [Boost.URL](https://github.com/vinniefalco/url). While I have not contributed any code,
-spending my time in the `#beast` Slack channel has meant that I've been able to keep up to speed with the various
-design choices being made. Again, there has been much to learn about a design rationale that focuses heavily on
-the potential maintenance burden.
-
-There is actually a lot that could be learned by developers in industry from taking part in or observing this
-discourse.
-
-# Work Schedule
-
-The C++ Alliance is based on the West Coast of the USA, while I live and work in the tiny Principality of
-[Andorra](https://en.wikipedia.org/wiki/Andorra) in mainland Europe. This puts me some nine hours ahead of my
-colleagues across the Pond.
-
-It turns out that this is a perfect way of working for me. I can get up at 8am, nip out for a couple of hours skiing or
-hiking, enjoy lunch and then get to work - before anyone else in the firm is even awake.
-
-I'm a bit of a night-owl anyway, so working later in order to engage in "lively debate" with my colleagues on Slack
-is no problem. It also means I have a legitmate excuse to get out of any social engagments I don't want to be bothered
-with.
-
-So for me it's all win.
-
-# Summary
-
-I've really enjoyed my first month. I think Vinnie worries that he'll nag me too much about seemingly unimportant
-details like commit message wording and achieving a certain tone in code documentation, but I don't mind it.
-
-Boost.Beast is a fantastic piece of work. It's Vinnie's baby, and I am privileged to be asked to hold it in my
-hands.
-
-I'm never going to take issue with a mother looking to protect her cubs.
-
-Furthermore, having a legitimate excuse to interact with the other maintainers of Boost on Slack is a pleasure.
-These people are some of the brightest minds on the planet. I live in hope that some of this brilliance will
-rub off.
-
-If you work with C++, I highly recommend that you join the [Cpplang](http://slack.cpp.al) Slack channel.
-
-If you'd like to contact me to discuss my experiences I'd be happy to receive a message on Slack.
-
-Thanks for reading.
-
-Richard Hodges
diff --git a/_posts/2020-02-29-RichardsFebruaryUpdate.md b/_posts/2020-02-29-RichardsFebruaryUpdate.md
deleted file mode 100644
index 7f5893f95..000000000
--- a/_posts/2020-02-29-RichardsFebruaryUpdate.md
+++ /dev/null
@@ -1,89 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's February Update
-author-id: richard
----
-
-# The Lesser HTTP Refactor
-
-Aside from the normal answering of queries and [issues](https://github.com/boostorg/beast/issues), February has been a
-month dominated by the difference between the `asio::DynamicBuffer_v1` and `asio::DynamicBuffer_v2` concepts.
-
-As I understand things, both Beast and Asio libraries developed the idea of the `DynamicBuffer` concept (or more
-correctly, Named Type Requirement \[NTR\]) at roughly the same time, but with slightly different needs.
-
-The original Asio `DyanmicBuffer` describes a move-only type, designed to be a short-lived wrapper over storage which
-would allow a composed operation to easily manage data insertions or retrievals from that storage through models of the
-`MutableBufferSequence` and `ConstBufferSequence` NTRs.
-
-In Beast, it was found that `DynamicBuffer` objects being move-only caused a difficultly, because the necessarily
-complex composed operations in Beast need to create a `DynamicBuffer`, perform operations on it, pass it to a
-sub-operation for further manipulation and then continue performing operations on the same buffer.
-
-If the `DynamicBuffer` as been passed by move to a sub operation, then before the buffer can be used again, it will
-have to be moved back to the caller by the callee.
-
-Rather than complicate algorithms, Beast's authors took a slightly different track - Beast `DynamicBuffer`s were specified
-to be pass-by-reference. That is, the caller is responsible for the lifetime of the `DynamicBuffer` and the callee is
-passed a reference.
-
-This satisfied Beast's needs but created an incompatibility with Asio and Net.TS.
-
-Vinnie Falco wrote a [paper](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1100r0.html) on the problem
-offering a solution to enabling complex composed operations involving `DynamicBuffer`s. On reflection, LEWG took a
-different view and solved the problem by
-[re-engineering](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1790r1.html) the `DynamicBuffer` NTR.
-
-The result is that Boost.Beast objects are now likely to encounter three versions of `DynamicBuffer` objects in the wild
-and needs to be able to cope gracefully.
-
-Boost.Asio now has the NTRs `DynamicBuffer_v1` and `DynamicBuffer_v2`, with the NTR `DynamicBuffer` being a synonym for
-either depending on configuration flags (defaulting to `DynamicBuffer_v2`).
-
-We have had to go a little further and add a new NTR in Beast, `DynamicBuffer_v0`. The meanings of these NTRs are:
-
-| NTR | Mapping in Asio | Mapping in previous Beast | Overview |
-| --- | --------------- | ------------------------- | -------- |
-| DynamicBuffer_v0 | none | DynamicBuffer | A dynamic buffer with a version 1 interface which must be passed by reference |
-| DynamicBuffer_v1 | DynamicBuffer_v1 | Asio DynamicBuffer (v1) | A dynamic buffer with a version 1 interface which must be passed by move |
-| DynamicBuffer_v2 | DynamicBuffer_v2 | none | A dynamic buffer with a version 2 interface which is passed by copy |
-
-My intention this month was to migrate the entire Beast code base to use Asio's (and current net.ts's) `DynamicBuffer_v2`
-concepts while still remaining fully compatible with `DynamicBuffer_v0` objects (which will be in existing user code).
-
-The first attempt sought to change as little of the Beast code as possible, by writing `DynamicBuffer_v0` wrappers for
-DynamicBuffer_v[1|2] objects, with those wrappers automatically created on demand in the initiating function of Beast IO
-operations. The problem with this approach is that it penalised the use of DynamicBuffer_v2 with an additional memory
-allocation (in order to manage a proxy of DynamicBuffer_v1's input and output regions). On reflection, it became
-apparent that in the future, use of `DynamicBuffer_v2` will be the norm, so it would be inappropriate for Beast to
-punish its use.
-
-Therefore, we have chosen to take the harder road of refactoring Beast to use `DynamicBuffer_v2` in all composed
-operations involving dynamic buffers, and refactor the existing `DynamicBuffer_v0` types in order to allow them to
-act as storage providers for `DynamicBuffer_v2` proxies while still retaining their `DynamicBuffer_v0` public
-interfaces.
-
-I had hoped to get all this done during February, but alas - in terms of released code - I only got as far as the
-refactoring of existing types.
-
-The code has been released into the [develop](https://github.com/boostorg/beast/tree/develop) branch as part of
-[Version 286](https://github.com/boostorg/beast/commit/c8a726f962b2fbf77d00b273b3c6fb0dd975a6b5).
-
-
-The refactor of HTTP operations and Websocket Streams in terms of `DynamicBuffer_v2` is underway and indeed mostly
-complete, but there was not sufficient time to release a sufficiently robust, reviewed and tested version this month.
-
-I plan to finish off this work in the first part of March, which will hopefully leave time for more interesting
-challenges ahead.
-
-# What's Next
-
-Well, the Beast [Issue Tracker](https://github.com/boostorg/beast/issues) is far from clear, so there is plenty of
-work to do.
-
-At some point though, I'd like to make a start on a fully featured higher level HTTP client library built on
-Boost.Beast.
-
-We'll have to see what unfolds.
diff --git a/_posts/2020-03-06-KrystiansFebruaryUpdate.md b/_posts/2020-03-06-KrystiansFebruaryUpdate.md
deleted file mode 100644
index bae87e619..000000000
--- a/_posts/2020-03-06-KrystiansFebruaryUpdate.md
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's February Update
-author-id: krystian
----
-
-
-# Introduction
-
-
-
-I'm a first-year university student at the University of Connecticut pursuing a degree in Computer Science. I joined The C++ Alliance near the end of January as a part-time Staff Engineer at the recommendation of Vinnie, who has been mentoring me for the past several months. I’ve been programming in C++ for around 2 years now, with an interest in library development and standardization. My original reasoning for choosing C++ over other languages was a little misguided, being that it was generally regarded as a difficult language, and I liked the challenge. Prior to this, I had dabbled in the language a little bit, but I really didn’t get into it until I discovered the existence of the standard. A language with explicitly defined semantics, all specified in a document that is difficult to parse sounded very appealing to me, and thus I embarked on my journey to learn everything about it that I could. If you were to ask me what I like most about it now, it would probably be a tie between the “you don’t pay for what you don’t use” principle and zero cost generics.
-
-
-
-With regard to standardization, I do a lot of standardese bug-fixing in the form of editorial issues and defect reports, and I write papers mostly focused on fixing the language. As for library development, I’m currently working on [Boost.JSON](https://github.com/vinniefalco/json) and [Boost.StaticString](https://github.com/boostorg/static_string), the latter of which has been accepted into Boost.
-
-
-
-# Boost.JSON
-
-
-
-My work on Boost.JSON originally started with writing a documentation style guide and then applying it to the library. The first place where this documentation was applied to was `json::string`; a non-template string type similar to `std::string`, but with a type erased allocator, used exclusively with `char`.
-
-
-
-The documentation of `json::string` proved to be no easy feat, due to its overwhelming number of overloaded member functions (`string::replace` itself has 12 overloads). Detailed documentation of these functions did prove to be a little tedious, and once I finished, it was clear that a better interface was needed.
-
-
-
-The standard interface for `std::string` is bound to a strict regimen of backward compatibility, and thus the interface suffers due to its lack of the ability to remove stale and unnecessary overloads. Take `replace` for instance: it has overloads that take a `string`, a type convertible to `string_view`, a pointer to a string, a pointer to a string and its size, a `string` plus parameters to obtain a substring and a `string_view` plus parameters to obtain a substring. All of these overloads can be replaced with a single overload accepting a `string_view` parameter, as it is cheap to copy, and can be constructed from all of the aforementioned types. Those pesky overloads taking parameters to perform a substring can also be done away with, as the substring operation can be performed at the call site. `json::string` includes a `subview` function which returns a `string_view` for cheap substring operations, so needless `json::string` constructions can be avoided.
-
-
-
-With the guidance of Peter Dimov, I drafted up a new `string_view` based interface, which dramatically reduced the number of overloads for each member function. Once the sign-off was given, it was a relatively trivial task to implement, as it was merely a matter of removing no longer needed overloads, and changing function templates that accepted objects convertible to `string_view` to be non-template functions with a `string_view` parameter.
-
-
-
-
-# Boost.StaticString
-
-
-
-I was originally invited by Vinnie to work on [Boost.StaticString](https://github.com/boostorg/static_string) (then Boost.FixedString) back in October, and since then it has been accepted into Boost. It provides a fixed capacity, dynamically sized string that performs no dynamic allocations. It’s a fast alternative to `std::string` when either the capacity of the string is known or can be reasonably approximated at compile time.
-
-
-
-My primary objective for February was to implement the post-review changes requested by our review manager Joaquin Muñoz in time for the next Boost release. Most of the changes by this point had already been implemented, but still lacked polish, and the test coverage had to be improved. Additionally, I wanted to add a few optimizations and improve the `constexpr` support, the latter proving to be quite a daunting task.
-
-
-
-The nice thing about `static_string` is that the capacity of the string is part of the type, consequently allowing us to make all kinds of assumptions and to statically resolve what optimizations we want to perform at compile time. In particular, the usual checks are done in insert and replace that determine if the source of the operation (i.e. the string that will be copied into the string) lies within the string the operation is performed upon can be elided if we can somehow guarantee that they will never overlap. Since non-potentially overlapping objects of different types are guaranteed to occupy distinct addresses, we can safely skip this check for overloads accepting `static_string` parameters, if the capacity of the strings differ. In my opinion, it’s pretty neat.
-
-
-
-The aforementioned check that is performed in insert and replace also was the source of some serious headaches with respect to `constexpr` support across implementations. When you mix in the requirement to support C++11, it gets even worse. The primary issue that presents itself is that comparing pointers with the built-in relational operators yield unspecified results when the pointers do not point to elements of the same array; evaluating such an expression is forbidden during constant evaluation. The usual workaround to this is to use the library comparison operators (i.e. `std::less`, `std::greater` etc.), and in standardese land, it’s all good and well. However, on actual implementations, this won’t always work. For example, when using clang with libstdc++, [the implementation for the library comparison operators](https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/include/bits/stl_function.h#L443) casts the pointers to integer types and compares the resulting integers. If `__builtin_is_constant_evaluated` is available it is used and the check is instead done using the built-in relational operators, but this was only [implemented on clang quite recently](https://reviews.llvm.org/D55500) and therefore cannot be used for the most part.
-
-
-
-Figuring this out and finding a solution was quite a process, but eventually, I settled on a solution that gave the least unspecified results across implementations, provided good performance, and actually compiled. In essence, if `std::is_constant_evaluated` or a builtin equivalent is available, we can use a neat trick involving the equality operator for pointers. Equality comparison for pointers [almost never has unspecified results](http://eel.is/c++draft/expr.eq#3), so to check if the pointer falls within a range, we can iterate over every pointer value within that range and test for equality. This is only done during constant evaluation, so performance does not suffer. If the aforementioned functions cannot be used, or if the function is not evaluated within a constant evaluation, we use the built-in comparison operators for configurations where the library comparison operators don’t work in constant evaluation, and otherwise use the library comparison operators. Having a portable version of this check in the standard library wouldn’t be a terrible idea, so it may be something I will pursue in the future.
-
-
-
-Another feature that took a little bit of time to get working were the `to_static_string` overloads for floating-point types. `static_string` does not enjoy the luxury of being able to increase capacity, so sticking to only using standard notation isn’t possible since floating-point types can represent extremely large values. To get the best of both worlds, we first attempt to use standard form and then retry using scientific notation if that fails. To match `std::to_string`, the default precision of 6 is used for standard form - however, if we resort to scientific notation, we use the highest precision value possible based on the number of digits required to represent all the values of the type being converted, and the number of digits in its maximum mantissa value. As Peter Dimov noted several times, using the `%g` format specifier would be a preferable solution, so I may change this in the future.
-
-
-
-# Standardization
-
-
-
-The Prague meeting came and went (I did not attend), and unfortunately no progress was made on any of my papers. CWG was very busy finalizing C++20, so that left no time for a wording review of [P1839](http://wg21.link/p1839), and I’m holding off on my other papers. I’m planning to attend the New York meeting in November representing The C++ Alliance (my first meeting, I’m excited!), and is where I will be presenting [P1945](http://wg21.link/p1945), [P1997](http://wg21.link/p1997), and a few others that are currently in the works. Outside of proposals, it was business as usual; finding and fixing several editorial issues, and filing a few defect reports. Most of the work I do focuses on fixing the core language, and generally improving the consistency of the wording, so this month I worked on fixing incorrect conventions, removing redundant wording, and a small rewrite of [[dcl.meaning]](http://eel.is/c++draft/dcl.meaning) on the editorial side of things. As for defect reports, they consisted of some small wording issues that weren’t quite editorial but didn’t have a significant impact on the language.
-
-
-
-# Summary
-
-
-
-My experience working at the Alliance has been very positive thus far. Being a student, having flexible hours is fantastic, as I am able to adjust when I work based on my school workload. In the short amount of time I have spent working on Boost.JSON and Boost.StaticString, I have learned a lot, and continue to do so every day. Vinnie, Peter, and Glen always provide their invaluable feedback through reviews and answering questions, which is extremely helpful when working on projects of this scale with little experience. I consider the acceptance of Boost.StaticString into Boost to be my crowning achievement thus far, and I’m excited to see what kinds of cool projects I’ll be working on in the future.
-
-
-
-If you want to get in touch with me, you can message me on the [Cpplang slack](http://slack.cpp.al/), or [shoot me an email](mailto:sdkrystian@gmail.com).
\ No newline at end of file
diff --git a/_posts/2020-03-31-RichardsMarchUpdate.md b/_posts/2020-03-31-RichardsMarchUpdate.md
deleted file mode 100644
index cfef33d1c..000000000
--- a/_posts/2020-03-31-RichardsMarchUpdate.md
+++ /dev/null
@@ -1,311 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's March Update
-author-id: richard
----
-
-# Coding in the time of a Pandemic
-
-It has been an interesting month, there having been the minor distraction of a lockdown of our
-little country. The borders with Spain and France were closed about three weeks ago and
-all residents have been asked to stay at home other than to buy groceries or walk their dogs.
-Fortunately I have dogs so I at least have a legitimate reason to see the sun.
-
-One of the advantages of living in a tiny country is that the government has been able to
-secure the supply of 150,000 COVID-19 testing kits, which represents two tests per resident.
-They are also working on supplying every resident with masks for use when shopping.
-I am hoping to report in my next blog that we are allowed outside subject to a negative
-test and the wearing of a mask and gloves.
-
-Fortunately, until today, our internet has been uninterrupted. Communication with my friends
-and colleagues at the C++ Alliance and the wider developer community has continued.
-
-# Boost Release
-
-The Boost 1.73 release is imminent. Thus much of my focus in the latter half of the month has
-been on addressing any remaining issues in Beast that represent an easy win in terms of
-demonstrating progress between releases.
-
-This brings to a close my first quarter as a maintainer of the Beast library. I would have
-liked to have produced more in terms of feature development and architectural improvements,
-but a few interesting things came up which delayed this; some of which I will share with you
-here.
-
-# (Possibly) Interesting Asio Things
-
-To say that Boost.Beast has a strong dependency on Boost.Asio would be an understatement. It
-should therefore come as no surprise that the Beast team spend a lot of time working with
-Asio and (certainly in my case) a lot of time working to understand the internals.
-
-We had cause to reach out to Chris Kohlhoff, Asio's author, on two occasions in recent
-times. If you read my February blog you would have seen the issues we have faced with the
-`DynamicBuffer` concept. This month it was about the thread-safety of composed operations and
-IO objects.
-
-But first, the result of a question I asked myself:
-
-## Is it possible to write an asynchronous composed operation entirely as a lambda?
-
-In short, if you're using c++14 or better, the answer is happily yes!
-
-Here is the smallest program I could think of:
-
-a: Implemented asynchronously
-
-b: Targeting a POSIX system (just because I happen to know more about POSIX than Windows)
-
-This program simply copies the contents of `stdin` to `stdout`:
-
-```cpp
-int
-main()
-{
- asio::io_context ioc;
- auto exec = ioc.get_executor();
-
- auto in = asio::posix::stream_descriptor(exec, ::dup(STDIN_FILENO));
- auto out = asio::posix::stream_descriptor(exec, ::dup(STDOUT_FILENO));
-
- async_copy_all(in, out, [](auto&& ec, auto total){
- std::cout << "\ntransferred " << total << " bytes\n";
- if (ec.failed())
- {
- std::cerr << "transfer failure: " << ec.message() << std::endl;
- std::exit(ec.value());
- }
- });
-
- ioc.run();
-
- return 0;
-}
-```
-
-People who are unused to writing composed operations (asynchronous operations that fit into
-the ASIO ecosystem), or people who have written them longer ago than last year, might at
-this stage feel their hearts sinking in anticipation of the complex horror show awaiting
-them when writing the function `async_copy_all`.
-
-Fortunately, Asio's new(ish) `async_compose` template function makes this reasonably
-painless:
-
-```cpp
-template
-auto
-async_copy_all(
- InStream &fd_in,
- OutStream &fd_out,
- CompletionToken &&completion)
-{
- return asio::async_compose<
- CompletionToken,
- void(system::error_code const &,std::size_t)>(
- [&fd_in, &fd_out,
- coro = asio::coroutine(),
- total = std::size_t(0),
- store = std::make_unique(4096)]
- (auto &self,
- system::error_code ec = {},
- std::size_t bytes_transferred = 0) mutable
- {
- BOOST_ASIO_CORO_REENTER(coro)
- for(;;)
- {
- BOOST_ASIO_CORO_YIELD
- {
- auto buf = asio::buffer(store.get(), 4096);
- fd_in.async_read_some(buf, std::move(self));
- }
- if (ec.failed() || bytes_transferred == 0)
- {
- if (ec == asio::error::eof)
- ec.clear();
- return self.complete(ec, total);
- }
-
- BOOST_ASIO_CORO_YIELD
- {
- auto buf = asio::buffer(store.get(), bytes_transferred);
- fd_out.async_write_some(buf, std::move(self));
- }
- total += bytes_transferred;
- if (ec.failed())
- return self.complete(ec, total);
- }
- },
- completion, fd_in, fd_out);
-}
-```
-
-There are a few things to note in the implementation.
-
-1. The first is that the entire asynchronous operation's implementation state is captured
-in the capture block of the lambda (this is why we need c++14 or higher)
-2. Secondly, the lambda is mutable. This is so we can update the state and then `move` it
-into the completion handler of each internal asynchronous operation.
-3. The second and third arguments of the lambda's function signature are defaulted. This is
-because `async_compose` will cause the implementation (in this case, our lambda) to be called
-once with no arguments (other than `self`) during initiation.
-4. There is an explicit check for `eof` after the yielding call to `fd_in.async_read_some`.
-In Asio, `eof` is one of a few error codes that represents an informational condition
-rather than an actual error. Another is `connection_aborted`, which can occur during
-an `accept` operation on a TCP socket. Failing to check for this error-that-is-not-an-error
-can result in asio-based servers suddenly going quiet for 'no apparent reason'.
-5. Notice that the un-named object created by `async_compose` intercepts every invocation on
-it and transfers control to our lambda by prepending a reference to itself to the argument
-list. The type of `Self` is actually a specialisation of an `asio::detail::composed_op<...>`
-(as at Boost 1.72). However, since this class is in the detail namespace, this should never
-be relied on in any program or library.
-6. Note that I create the buffer object `buf` in separate statements to the initiations of
-the async operations on the streams. This is because the `unique_ptr` called `store` is going
-to be `move`d during the initiating function call. Remember that arguments to function calls
-are evaluated in unknowable order in c++, so accessing `store` in the same statement in
-which the entire completion handler has been `move`d would result in UB.
-7. Finally, `async_compose` is passed both the input and output stream (in addition to their
-references being captured in the lambda) so that both streams' associated executors can be
-informed that there is outstanding work. It may be surprising to some that the input and
-output streams may legally be associated with different executors.
-
-Actually, now that I write this, it occurs to me that it is unclear to me what is the
-'associated executor' of the composed operation we just created. Asio's documentation is
-silent on the subject.
-
-Inspecting the code while single-stepping through a debug build revealed that the executor is
-taken from the first of the `io_objects_or_executors&&...` arguments to `async_compose` which
-itself has an associated executor. If none of them do, then the `system_executor` is chosen as
-the default executor (more on why this may cause surprises and headaches later). Note that as
-always, wrapping the lambda in a call to `bind_executor` will force the composed operation's
-intermediate invocations to happen on the bound executor.
-
-In our case, it is `fd_in` which will be providing the executor and as a result, every
-invocation of our lambda (except the first) is guaranteed to be happen by being invoked
-as if by `post(fd_in.get_executor(), (...))`.
-
-## `system_executor` and "What Could Possibly Go Wrong?"
-
-Once upon a time, when I first started using Asio, there were no `executor`s at all. In
-fact, there were no `io_context`s either. There was an `io_service` object. At some point
-(I don't remember the exact version of Asio, but it was at least five years ago) the
-`io_service` was replace with `io_context`, an object which did basically the same job.
-
-More recently, the `io_context` represents the shared state of a model of the `Executor`
-Named Type Requirement (aka Concept). The state of the art is moving towards passing copies
-of `Executor`s rather than references to `io_context`s.
-
-Asio now contains a concrete type, the `executor` which is a type-erased wrapper which
-may be assigned any any class which models an `Executor`.
-
-As you might expect, we are heading into a world where there might be more than one model
-of `Executor`. In anticipation of this, by default, all Asio IO objects are now associated
-with the polymorphic wrapper type `executor` rather than a `io_context::executor_type`.
-
-One such model of `Executor` supplied by Asio is the `system_executor`, which is actually
-chosen as the default associated executor of any completion handler. That is, if you initiate
-an asynchronous operation in Asio today, against a hypothetical io_object that does not have
-an associated executor and you do not bind your handler to an executor of your own, then
-your handler will be invoked as-if by `post(asio::system_executor(), )` - that is,
-it will be called on some implementation-defined thread.
-
-Now that the basics are covered, back to _what could possibly go wrong_?
-
-Well imagine a hypothetical home-grown IO Object or _AsyncStream_. Older versions of the Asio
-documentation used to include an example user IO Object, the logging socket.
-
-The basic premise of our logging socket is that it will do everything a socket will do, plus
-log the sending and receiving of data, along with the error codes associated with each read
-or write operation.
-
-Clearly the implementation of this object will contain an asio socket object and some kind of
-logger. The internal state must be touched on every asynchronous operation initiation (to
-actually initiate the underlying operation and record the event) *and* during every
-completion handler invocation, in order to update the logger with the results of the
-asynchronous operation.
-
-As we know, invocations of intermediate completion handlers happen on the executor associated
-with the final completion handler provided by the user, so in our case, the actions will be
-something like this:
-
-```
-on the initiating thread:
- logging_socket::async_write_some
- logging_socket::async_write_some_op::operator()()
- logging_socket::impl::update_logger(...)
- socket::async_write_some(...)
-
-... time passes...
-
-on a thread associated with the associated executor:
- logging_socket::async_write_some_op::operator()(ec, bytes_transferred)
- logging_socket::impl::update_logger()
- user_completion_handler(ec, bytes_transferred)
-```
-
-The situation will be similar for a write operation.
-
-Now consider the following code (`ls` is an object of our hypothetical type `logging_socket`:
-
-```cpp
- ls.async_write_some(
- get_tx_buffer(),
- net::bind_executor(
- net::system_executor(),
- [](auto ec, auto size){
- /* what happens here is not relevant */
- }));
- ls.async_read_some(
- get_rx_buffer(),
- net::bind_executor(
- net::system_executor(),
- [](auto ec, auto size){
- /* what happens here is not relevant */
- }));
-```
-
-What have I done? Not much, simply initiated a read and a write at the same time - a
-perfectly normal state of affairs for a socket. The interesting part is that I have
-bound both asynchronous completion handlers to the `system_executor`. This means that
-each of the handlers will be invoked (without synchronisation) on two arbitrary threads.
-
-Looking at our pseudo-code above, it becomes clear that there will be a race for the
-`logging_socket`'s implementation:
-
-* Between the initiation of the read and the completion of the write, and
-* between the completion of the read and the completion of the write
-
-Again the Asio documentation is silent on the correct method of mitigating this situation.
-Two possible workarounds have occurred to me so far:
-
-1. Never use a `system_executor` unless first wrapping it in a `strand`.
-2. Ensure that all composed operations of IO objects are thread-safe with respect to
- mutation of the implementation. If this is made true, it almost inevitably follows that
- the entire IO Object may as well be made thread-safe (which Asio IO Objects are not).
-
-I have reached out to Chris for final judgement and will update the blog (and possibly much
-of Beast!) in response to a definitive answer.
-
-# Unified Web Client
-
-I have been given the go-ahead to make a start on exploring a unified web-client library
-which will eventually become a candidate for inclusion into Boost.
-
-The obvious course of action, building directly on top of Beast is a no-go. If the
-library is to be used on platforms such as tablets and phones, or appear in the various
-app stores of vendors, there are restrictions on which implementations of communications
-libraries may be used. To cut a long story short, vendors want to minimise the risk of
-security vulnerabilities being introduced by people's home-grown communications and
-encryption code.
-
-So my initial focus will be on establishing an object model that:
-
- * Provides a high degree of utility (make simple things simple).
- * Emulates or captures the subtleties of vendor's Web Client frameworks.
- * Efficiently slots into the Asio asynchronous completion model.
-
-Of course, linux and proprietary embedded systems do not have a mandated communications
-libraries, so there will certainly be heavy use of Beast in the unconstrained platform-
-specific code.
-
-More information as it becomes available.
-
diff --git a/_posts/2020-04-07-KrystiansMarchUpdate.md b/_posts/2020-04-07-KrystiansMarchUpdate.md
deleted file mode 100644
index 0b663ac68..000000000
--- a/_posts/2020-04-07-KrystiansMarchUpdate.md
+++ /dev/null
@@ -1,166 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's March Update
-author-id: krystian
----
-
-
-# The Rundown
-
-Due to the COVID-19 pandemic, my classes have been moved online. It's certainly an interesting way to teach, but admittedly I can't say it's enjoyable or effective. However, it has given me a lot of time to work on various projects, which is a reasonable trade-off (at least in my opinion). I got quite a bit done this month due to the substantial increase in leisure time, and was able to work on several projects that previously didn't fit into my schedule.
-
-# Boost.StaticString
-
-I spent the first few days of March putting the finishing touches on Boost.StaticString in preparation for the release of Boost 1.73.0, mostly consisting of housekeeping tasks, but also some bug fixes for certain compiler configurations. In particular, a problem arose with GCC 5 regarding its `constexpr` support, two of which impede using `basic_static_string` during constant evaluation: `throw` expressions, and non-static member functions whose return type is the class they are a member of. With respect to the former, consider the following:
-
-```cpp
-constexpr int throw_not_evaluated(bool flag)
-{
- if (flag)
- throw 1;
- return 0;
-}
-
-constexpr int const_eval = throw_not_evaluated(false);
-```
-[View this on Godbolt](https://godbolt.org/z/CEuEvr)
-
-It is helpful to first establish what the standard has to say regarding the above example. Looking at [[dcl.constexpr] p3](https://timsong-cpp.github.io/cppwp/n4140/dcl.constexpr#3), we see that `throw_not_evaluated` contains no constructs that are explicitly prohibited from appearing within a `constexpr` function in all contexts. Now taking a look at [[expr.const] p2](https://timsong-cpp.github.io/cppwp/n4140/expr.const#2) we see:
-> A *conditional-expression* `e` is a core constant expression unless the evaluation of `e`, following the rules of the abstract machine, would evaluate one of the following expressions:
-> - [...] a *throw-expression*
-
-Boiling down the standardese, this effectively says that `throw_not_evaluated(false)` is a constant expression unless, when evaluated, it would evaluate`throw 1`. This would not occur, meaning that `throw_not_evaluated(false)` is indeed a constant expression, and we can use it to initialize `const_eval`. Clang, later versions of GCC, and MSVC all agree on this and compile it without any complaints. However, GCC 5 with the `-std=c++14` flag fails to do so, citing:
-> error: expression '<**throw-expression**>' is not a constant-expression
-
-Sounds excusable to me! GCC 5.1 was released in 2015 after all, so you can't expect every feature to be implemented less than a year after C++14 was finalized, right? It all sounded sane to me, but before going ahead and disabling `constexpr` for functions that were potentially-throwing, I decided to try a small variation of the original:
-```cpp
-struct magic
-{
- constexpr magic(bool flag)
- {
- if (flag)
- throw 1;
- return;
- }
-
- constexpr operator int() { return 1; }
-};
-
-constexpr int magic_eval = magic(false);
-```
-[View this on Godbolt](https://godbolt.org/z/fXQxDT)
-
-*What?*
-
-It miraculously works. Further, if we construct a `magic` object within a constexpr function:
-```cpp
-constexpr int throw_not_evaluated(bool flag)
-{
- return magic(flag);
-}
-```
-[View this on Godbolt](https://godbolt.org/z/qMAkiQ)
-
-***What?***
-
-Gathering the remnants of my sanity, I lifted the `BOOST_STATIC_STRING_THROW_IF` macros out of all the potentially-throwing functions and replaced them with a class template (for GCC 5) or function template (for all other compilers) that can be constructed/called with the `throw_exception(message)` syntax. I also considered adding a `throw_exception_if` variation to hide the `if` statement within shorter functions, but on advisement from Vinnie Falco and Peter Dimov, I didn't end up doing this to allow for better optimization.
-
-Moving on to a simpler GCC 5 issue (but significantly more annoying to diagnose), I discovered that `substr` was causes an ICE during constant evaluation. Took some time to get to the bottom of it, but I eventually figured out that this was because the return type of `substr` is `basic_static_string`. Unfortunately, the only remedy for this was to disable `constexpr` for the function when GCC 5 is used.
-
-Save for these two issues, the rest of the work that had to be done for StaticString was smooth sailing, mostly improvements to the coverage of the `insert` and `replace` overloads that take input iterators. In the future I plan to do an overhaul of the documentation, but as of now it's ready for release, so I'm excited to finally get this project out into the wild.
-
-# Boost.JSON
-
-Most of the time I spent on Boost.JSON in the past month was spent learning the interface and internals of the library, as I will be writing documentation on many of the components in the future. My primary focus was on the `value_ref` class, which is used as the value type of initializer lists used to represent JSON documents within source code. The reason that this is used instead of `value` is because `std::initializer_list` suffers from one fatal flaw: the underlying array that it refers to is `const`, which means you cannot move its elements. Copying a large JSON document is not trivial, so `value_ref` is used as a proxy referring to an underlying object that can be moved by an operation using an initializer list. This is achieved by storing a pointer to a function that will appropriately copy/move construct a `value` when called requested by the target.
-
-While looking at how `value_ref` works, I went ahead and added support for moving from a `json::string`, since up to that point all strings were stored as `string_view` internally, thus precluding the ability to move from a `value_ref` constructed from a `json::string`. There also was a bug caused by how `value_ref` handles construction from rvalues, in part due to the unintuitive nature of type deduction. Consider the following:
-```cpp
-struct value_ref
-{
- template
- value_ref(const T&) { ... }
-
- template
- value_ref(T&&) { ... }
-};
-
-json::value jv;
-const json::value const_jv;
-value_ref rvalue = std::move(jv); // #1
-value_ref const_rvalue = std::move(const_jv); // #2
-```
-In both `#1` and `#2`, the constructor `value_ref(T&&)` is called. This certainly makes sense once you consider the deduction that is performed, however, by glancing at just the declarations themselves, it isn't obvious, as we've all been taught that references to non-const types will not bind to a const object. Where this becomes a problem for `value_ref` is that the constructor taking a `T&&` parameter expects to be able to move from the parameter, so it internally stores a non-const `void*`. Converting a "pointer to `const` `T`" to `void*` isn't permitted, so you get a hard error. The fix for this was fairly trivial.
-
-# Standardization
-
-Most of my time this month was spent working on standardization related activities, which can be broken up into two somewhat separate categories:
-- Fixing existing wording
-- Working on papers
-
-## Editorial issues
-
-I submitted a fair few pull requests to the [draft repository](https://github.com/cplusplus/draft), most of which were general wording cleanups and improvements to the consistency of the wording. With respect to wording consistency, I targeted instances of incorrect cv-qualification notation, definitions of terms within notes, cross-references that name the wrong subclause or are self-referential, and redundant normative wording. These kinds of issues are all over the standard, but I generally stay away from the library wording unless it's absolutely necessary since it has a subtitle difference in the wording style compared to that of the core language. I ended up writing atool to make grepping for these issues a little easier, which is certainly an improvement over manually inspecting each TeX source.
-
-The wording cleanups are the most time consuming, but also are the ones I find the most enjoyable. They all follow the same principle of rephrasing an idea with more accurate wording while not changing the actual meaning -- something that often proves to be a challenge. These generally start off as small issues I notice in the text, but then snowball into complete rewrites to make the whole thing consistent. Anyways, if it sparks your interest you can find the various editorial fixes I worked on [here](https://github.com/cplusplus/draft/pulls?q=is%3Apr+author%3Asdkrystian).
-
-## P1997
-
-Of my papers, [P1997](http://wg21.link/p1997) is the one that I'm putting the most time into. In short, it proposes to make arrays more "regular", allowing assignment, initialization from other arrays, array placeholder types, and many more features we bestow upon scalar and class types. The holy grail of changes (not proposed in the paper) would be to allow the passing of arrays by value *without* decaying to a pointer: fully cementing arrays as first-class types. Unlike the other proposed changes this isn't a pure extension, so to make it remotely feasible existing declarations of parameters with array type (for the sake of brevity we will abbreviate them as PAT) would have to be deprecated in C++23, removed in C++26, and then reinstated in C++29. For this reason, we are undertaking this as an entirely different paper to allow for the pure extensions to be added in C++23, leaving the removal and reinstatement on the backburner.
-
-In order to bring convincing evidence that:
-
- 1. The current semantics for PAT are unintuitive and merely syntactic sugar.
- 2. The deprecation of PAT would not be significantly disruptive to existing codebases.
-
-Ted (the co-author) and I decided to see exactly how often PAT appear within normal C++ code. While [codesearch.isocpp.org](https://codesearch.isocpp.org/) by Andrew Tomazos is a fantastic tool, the search we wanted to conduct was simply not possible with his tool, so we set out to create our own. We needed two things:
-
- 1. A dataset
- 2. Some tool to parse all the source files
-
-For the dataset, I wrote a tool to clone the top 4000 C++ Github repositories, and clean out all files that weren't C++ source files (.cpp, .cxx, .cc, .h, .hpp, .hxx, .ipp, .tpp). Github has a nice API to search for repositories, but it unfortunately limits the results for a single search query to 1000 results, but since I was sorting them by start count, I was able to use it as an index to begin a new search query. After accidentally sending my Github credentials to Ted once and waiting 10 hours for all the repositories to be cloned, we had our dataset: 2.7 million source files, totaling around 32GB.
-
-To parse all these files, we opted to use the Clang frontend. Its AST matcher was perfectly suited to the task, so it was just a matter of forming the correct matchers for the three contexts a PAT can appear in (function parameter, non-type template parameter, and `catch` clause parameter). All the files were parsed in single file mode, since opening and expanding `#include` directives would make the processing of the files take exponentially longer. Forming the correct match pattern proved to be the most difficult part, as the syntax is not entirely intuitive and often times they simply didn't find every PAT in our test cases.
-
-Doing a single file parse along with wanting to find every PAT possible *and* suppressing all diagnostics landed us in the land of gotchas. To start, the `arrayType` matcher only would match declarations that had an array declarator present, i.e. `int a[]` would be found but `T a` where `T` names an array type would not. Eventually I found `decayedType`, which did exactly what we wanted, so long as we filtered out every result that was a function pointer. This worked for function parameters and non-type template parameters, but not `catch` clause parameters. In the Clang AST, `catch` is categorized as a statement that encloses a variable declaration whose type is not considered to be decayed (as far as I could see) so we could only match parameters that used an array declarator. I don't expect anyone to actually declare PATs in a `catch` clause, and after running the tool on the dataset exactly zero instances were found, so this is most likely a non-issue.
-
-Single file parsing introduced a number of issues all stemming from the fact that none of the `#include` directives were processed, meaning that there was a large number of types that were unresolved. Consider the following:
-```cpp
-#include
-
-using array = unresolved[2];
-using array_ref = unk_array&;
-array_ref ref = {unresolved(), unresolved()};
-
-void f(decltype(ref) param);
-```
-For reasons that I don't know, since `unresolved` has no visible declaration Clang reports that `param` has a decayed type. I suspect this is because diagnostics are suppressed and some recursive procedure used in the determination of the type named by `array_ref` returns upon encountering an error at the declaration of `array` and simply returns `unresolved[2]` as the type. If you know why this happens, don't hesitate to ping me! I ended up tracking the number of PAT declarations that use an array declarator separately since I suspect that this number may end up being more accurate.
-
-Once the tool was ready to go and we started to run it on the dataset, we encountered issues of the worst kind: assertion failures. I suppose such errors could be expected when abusing a compiler to the extent that we did, but they weren't particularly enjoyable to fix. I should mention that tool itself is meant to be run on a directory, so once an assertion failed, it would end the entire run on that directory. My initial solution to this was changing the problematic asserts to throw an exception, but the number of failures was ever-growing. Creating a new `CompilerInstance` for each file did somewhat remedy the situation, but didn't fix it all. Eventually, we called it good enough and let it run over the entire dataset. Clang itself was in our dataset, with a nasty little surprise taking the form of infinitely recursive template instantiations and debug pragmas that would crash the compiler. Clang relies on the diagnostics to signal when the recursive instantiation limit is reached, but since those were disabled the thread would never terminate. Evil.
-
-Once the paper is finished, I'll report the results in a future post.
-
-## Implicit destruction
-
-This paper intends to substantially overhaul the wording that describes the interaction between objects and their storage. I originally brought this issue up several months back with [this pull request](https://github.com/cplusplus/draft/pull/2872) in an attempt to fix it editorially, but it was deemed too large in scope. I finally got started on the paper and drafted up a direction to take, which will hopefully resolve the shortfalls of our current wording.
-
-This problem stems from the notion that objects don't exist before their lifetime has started and after it has ended. This allows compilers to make a lot of assumptions and consequently leads to more optimized code, but the manner in which the wording was applied has severely crippled our ability to refer to "not-objects". We want to be able to place restrictions on storage that an object used to occupy, but simply have no way for doing so. Thus, the direction I plan to take is to define the semantics of storage, allowing us to place restrictions on that storage even if no object exists within it. I don't have too many of the core definitions completed yet as they require the most time to make them robust, but once that is hashed out, applying it where needed should be smooth sailing.
-
-Here is a short list of the main changes:
-- Define what a *region of storage* is, specify when it is acquired, released, and what kinds of objects may occupy it.
-- Remove the storage duration property from objects, and effectively make that agnostic of the storage they occupy. Instead, associate storage duration with a variable. Dynamic storage duration can removed since such storage isn't associated with a variable.
-- Specify when *reuse* of storage occurs, and its effects upon the objects within that storage.
-- Properly specify when pointers and expressions refer to storage while preserving the notion that they refer to objects (or functions).
-- Specify that when control passes through the definition of a variable, storage is acquired, objects are created, and initialization is performed. Likewise, specify that exit from a scope, program, or thread causes objects within the storage to be destroyed (if any), and the storage to be released.
-
-It's a big undertaking, but I'm excited to work on this and see what kind of feedback I get. However, this paper will be more of a long term project, since it will be touching the majority of the wording in the standard. I'll provide updates in future posts.
-
-## Placement new during constant evaluation
-
-A paper that I've been thinking about working on and finally got around to revolves around a new (heh) C++20 feature: `new` expressions are now able to be evaluated during constant evaluation (CE). While the storage they acquire must be released during the CE and it may only call the replaceable global allocation functions, it finally allows for the use of dynamically sized containers within a constant expression.
-
-This is great! However, there is a restriction here that is completely avoidable. The function `std::construct_at` was introduced to allow for objects to be constructed as if by placement `new` -- nice, but we don't allow placement `new` to be used by itself. This is because a certain implementation can't resolve what object a `void*` points to during CE (thank you Tim Song for the info); and because CE is intended to always yield the same results on all implementations, `construct_at` is used to ensure the pointer type passed is always a pointer to object type. I think that *at the very least*, congruent placement `new` expressions should be allowed by the principle of this being unnecessarily restrictive. As with all the other papers, I'll post progress updates in a future post. I've drafted up some wording, and I plan to have this ready sometime around June.
-
-# Information
-If you want to get in touch with me, you can message me on the [Cpplang slack](http://slack.cpp.al/), or [shoot me an email](mailto:sdkrystian@gmail.com).
diff --git a/_posts/2020-04-28-New-Boost-Release.md b/_posts/2020-04-28-New-Boost-Release.md
deleted file mode 100644
index 1aed4b54f..000000000
--- a/_posts/2020-04-28-New-Boost-Release.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: New Boost Release
----
-
-# Boost 1.73
-
-## New Library
-StaticString:
-A dynamically resizable string of characters with compile-time fixed capacity and contiguous embedded storage, from Krystian Stasiowski and Vinnie Falco.
-
-Boost Release Notes
diff --git a/_posts/2020-04-30-RichardsAprilUpdate.md b/_posts/2020-04-30-RichardsAprilUpdate.md
deleted file mode 100644
index bfaf46412..000000000
--- a/_posts/2020-04-30-RichardsAprilUpdate.md
+++ /dev/null
@@ -1,454 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's April Update
-author-id: richard
----
-
-# Boost 1.73 Released and other Matters
-
-The 1.73.0 release of Boost took up more attention than I had anticipated, but in the end all seemed to go well.
-
-Since then I've been working through the issues list on GitHub and am now starting to make some headway.
-
-I cam across a few other interesting (to me) topics this month.
-
-# (Possibly) Interesting Asio Things
-
-Last month I asked the question, "Is it possible to write an asynchronous composed operation entirely as a lambda?".
-
-This month I went a little further with two items that interested me.
-
-The first is whether asio's `async_compose` can be adapted so that we can implement a complex composed operation involving
-more than one IO object easily using the asio faux `coroutine` mechanism.
-
-The second was whether is was possible to easily implement an async future in Asio.
-
-## Async Asio Future
-
-Here is my motivating use case:
-
-```cpp
- auto p = async::promise();
- auto f = p.get_future();
-
- // long-running process starts which will yield a string
- start_something(std::move(p));
-
- // wait on the future
- f.async_wait([](some_result_type x) {
- // use the x
- });
-
- // or
- auto str = co_await f.async_wait(net::use_awaitable);
-
- // or shorthand
- auto str = co_await f();
-
-```
-
-The salient points here are:
-* no matter on which thread the promise is fulfilled, the future will complete on the associated executor of the handler
- passed to `async_wait`
-* Ideally the promise/future should not make use of mutexes un-necessarily.
-* (problematic for ASIO) It must work with objects that are not default-constructable.
-
-In the end, I didn't achieve the second goal as this was not a priority project, but I would be interested to see
-if anyone can improve on the design.
-
-The source code is [here](https://github.com/madmongo1/webclient/blob/develop/include/boost/webclient/async/future.hpp)
-
-I tried a couple of ways around the non-default-constructable requirement. My first was to require the CompletionToken
-to the async_wait initiating function to be compatible with:
-
-```cpp
-void (error_code, std::optional)
-```
-
-But I felt this was unwieldy.
-
-Then I remembered Boost.Outcome. I have been looking for a use for this library for some time.
-It turns out that you can legally write an ASIO composed operation who's handler takes a single
-argument of any type, and this will translate cleanly when used with `net::use_future`, `net::use_awaitable` etc.
-
-A default Boost.Outcome object almost fits the bill, except that its exception_ptr type is boost rather than standard.
-
-This is easily solved with a typedef:
-```cpp
-template using myoutcome = boost::outcome2::basic_outcome;
-```
-
-I was feeling please with myself for figuring this out, until I came to test code code under C++11... and realised
-that Boost.Outcome is only compatible with C++14 or higher.
-
-So in the end, I cobbled together a 'good enough' version of outcome using a variant:
-
-```cpp
-template < class T >
-struct outcome
-{
- outcome(T arg) : var_(std::move(arg)) {}
- outcome(error_code const& arg) : var_(arg) {}
- outcome(std::exception_ptr const& arg) : var_(arg) {}
-
- auto has_value() const -> bool { return polyfill::holds_alternative< T >(var_); }
- auto has_error() const -> bool { return polyfill::holds_alternative< error_code >(var_); }
- auto has_exception() const -> bool { return polyfill::holds_alternative< std::exception_ptr >(var_); }
-
- auto value() & -> T &;
- auto value() && -> T &&;
- auto value() const & -> T const &;
-
- auto error() const -> error_code const &;
-
- using variant_type = polyfill::variant< T, error_code, std::exception_ptr >;
- variant_type var_;
-};
-```
-
-The code for this is [here](https://github.com/madmongo1/webclient/blob/develop/include/boost/webclient/polyfill/outcome.hpp)
-
-Finally this allowed me to express intent at the call site like so:
-
-```cpp
- auto f = p.get_future();
-
- f.async_wait([](outcome os){
- if (os.has_value())
- // use the value
- else if (os.has_error())
- // use the error
- else
- // deal with the exception
- });
-```
-
-The coroutine interface can be made cleaner:
-
-```cpp
- try {
- auto str = co_await f();
- // use the string
- }
- catch(system_error& ec) {
- // use the error code in ec.code()
- }
- catch(...) {
- // probably catastrophic
- }
-```
-
-For the above code to compile we'd have to add the following trivial transform:
-
-```cpp
- template < class T >
- auto future< T >::operator()() -> net::awaitable< T >
- {
- auto r = co_await async_wait(net::use_awaitable);
- if (r.has_value())
- co_return std::move(r).assume_value();
- else if (r.has_error())
- throw system_error(r.assume_error());
- else
- throw r.exception();
- }
-```
-
-
-## Easy Complex Coroutines with async_compose
-
-When your composed operation's intermediate completion handlers are invoked,
-the underlying `detail::composed_op` provides a mutable reference to itself. A typical completion handler looks like
-this:
-
-```cpp
- template
- void operator()(Self& self, error_code ec = {} , std::size_t bytes_transferred = 0)
- {
- reenter(this) {
- // yields and operations on Self
- yield async_write(sock, buf, std::move(self)); // note that self is moved
- }
- }
-```
-
-What I wanted was a composed operation where the following is legal:
-
-```cpp
- template
- void operator()(Self self /* note copy */, error_code ec = {} , std::size_t bytes_transferred = 0)
- {
- reenter(this) {
- // yields and operations on Self
- yield
- {
- async_write(sock, buf, self);
- timer.async_wait(self);
- writing = true;
- sending = true;
- }
-
- while(writing || sending)
- yield
- // something needs to happen here to reset the flags and handle errors and cancellation.
- ;
- }
- }
-```
-
-Which I think looks reasonably clear and easy to follow.
-
-In this work I had to overcome two problems - writing the framework to allow it, and thinking of a maintainable way to
-express intent in the interrelationships between the asynchronous operations on the timer and the socket.
-
-Solving the copyable composed_op problem was easy. I did what I always do in situations like this. I cheated.
-
-`asio::async_compose` produces a specialisation of a `detail::composed_op<>` template. Substituting a disregard of the
-rules for knowledge and skill, I simply reached into the guts of asio and produced a copyable wrapper to this class.
-I also cut/pasted some ancillary free functions in order to make asio work nicely with my new class:
-
-Here's the code... it's not pretty:
-
-```cpp
-template < class Impl, class Work, class Handler, class Signature >
-struct shared_composed_op
-{
- using composed_op_type = boost::asio::detail::composed_op< Impl, Work, Handler, Signature >;
-
- using allocator_type = typename net::associated_allocator< composed_op_type >::type;
- using executor_type = typename net::associated_executor< composed_op_type >::type;
-
- shared_composed_op(composed_op_type &&op)
- : impl_(std::make_shared< composed_op_type >(std::move(op)))
- {
- }
-
- shared_composed_op(std::shared_ptr< composed_op_type > op)
- : impl_(std::move(op))
- {
- }
-
- void initial_resume() { impl_->impl_(*this); }
-
- template < class... Args >
- void operator()(Args &&... args)
- {
- if (impl_->invocations_ < ~unsigned(0))
- {
- ++impl_->invocations_;
- impl_->impl_(*this, std::forward< Args >(args)...);
- }
- }
-
- template < class... Args >
- void complete(Args &&... args)
- {
- impl_->complete(std::forward< Args >(args)...);
- }
-
- auto get_allocator() const -> allocator_type { return impl_->get_allocator(); }
- auto get_executor() const -> executor_type { return impl_->get_executor(); }
-
- std::shared_ptr< composed_op_type > impl_;
-};
-
-template < class Impl, class Work, class Handler, class Signature >
-auto share(boost::asio::detail::composed_op< Impl, Work, Handler, Signature > &composed_op)
- -> shared_composed_op< Impl, Work, Handler, Signature >
-{
- auto op = shared_composed_op< Impl, Work, Handler, Signature >(std::move(composed_op));
- op.initial_resume();
- return op;
-}
-
-template < class Impl, class Work, class Handler, class Signature >
-auto share(shared_composed_op< Impl, Work, Handler, Signature > shared_thing)
- -> shared_composed_op< Impl, Work, Handler, Signature >
-{
- return shared_thing;
-}
-
-template < typename Impl, typename Work, typename Handler, typename Signature >
-inline void *asio_handler_allocate(std::size_t size, shared_composed_op< Impl, Work, Handler, Signature > *this_handler)
-{
- return boost_asio_handler_alloc_helpers::allocate(size, this_handler->impl_->handler_);
-}
-
-template < typename Impl, typename Work, typename Handler, typename Signature >
-inline void asio_handler_deallocate(void * pointer,
- std::size_t size,
- shared_composed_op< Impl, Work, Handler, Signature > *this_handler)
-{
- boost_asio_handler_alloc_helpers::deallocate(pointer, size, this_handler->impl_->handler_);
-}
-
-template < typename Impl, typename Work, typename Handler, typename Signature >
-inline bool asio_handler_is_continuation(shared_composed_op< Impl, Work, Handler, Signature > *this_handler)
-{
- return asio_handler_is_continuation(this_handler->impl_.get());
-}
-
-template < typename Function, typename Impl, typename Work, typename Handler, typename Signature >
-inline void asio_handler_invoke(Function &function, shared_composed_op< Impl, Work, Handler, Signature > *this_handler)
-{
- boost_asio_handler_invoke_helpers::invoke(function, this_handler->impl_->handler_);
-}
-
-template < typename Function, typename Impl, typename Work, typename Handler, typename Signature >
-inline void asio_handler_invoke(const Function & function,
- shared_composed_op< Impl, Work, Handler, Signature > *this_handler)
-{
- boost_asio_handler_invoke_helpers::invoke(function, this_handler->impl_->handler_);
-}
-
-```
-
-With that in hand, and with a little more _jiggery pokery_, I was able to express intent thus:
-
-```cpp
- template < class Self >
- void operator()(Self &self, error_code ec = {}, std::size_t bytes_transferred = 0)
- {
-...
- auto &state = *state_;
-
- reenter(this)
- {
- ...
-
- // here's the interesting bit - self becomes a copyable handle to itself
- yield share(self);
-
- // deduce the port
- yield
- {
- this->initiate_resolve(share(self), state.uri.hostname(), deduce_http_service(state.uri));
- this->initiate_timout(share(self), state.session_.resolve_timeout());
- }
-
- while (this->resolving() || this->timeout_outstanding())
- yield;
-
- if (this->error)
- goto finish;
-
- // connect the socket
-
- state.current_resolve_result = this->resolved_endpoints().begin();
- while (state.current_resolve_result != this->resolved_endpoints().end())
- {
- state.tcp_stream().expires_after(state.session_.connect_timeout());
- yield state.tcp_stream().async_connect(state.current_resolve_result->endpoint(), share(self));
- log("Connect to: ", state.current_resolve_result->endpoint(), " result: ", ec);
- // if the connect is successful, we can exit the loop early.
- if (!ec)
- goto connected;
- ++state.current_resolve_result;
- }
- // if we leave the loop, make sure there is an error of some kind
- this->set_error(ec);
- goto finish;
-
- connected:
-
- ...
-```
-
-The full code can be seen [here](https://github.com/madmongo1/webclient/blob/develop/include/boost/webclient/asio/get_op.hpp)
-
-There are a couple of interesting things to note:
-
-If you start two or more async operations that will complete on the same object, they must all be allowed to complete.
-This is why we yield and wait for both the socket and the timeout:
-
-```cpp
- while (this->resolving() || this->timeout_outstanding())
- yield;
-```
-
-This leads directly to the problem of managing the error_code. Two error_codes will be produced - one for the timer
-(which we hope to cancel before it times out) and one for the resolve operation.
-This means we have to store the first relevant error code somewhere:
-
-```cpp
-/// @brief a mixin to manage overall operation error state
-struct has_error_code
-{
- auto set_error(error_code const &ec) -> error_code &
- {
- if (!error)
- {
- if (ec && ec != net::error::operation_aborted)
- error = ec;
- }
- return error;
- }
-
- error_code error;
-};
-```
-
-And we need a means of allowing communication between the timeout timer and the resolver:
-
-```cpp
- template < class Self >
- void initiate_resolve(Self self, std::string const &host, std::string const &service)
- {
- results_.reset();
- resolver_.async_resolve(host, service, std::move(self));
- }
-
- template < class Self >
- void operator()(Self &self, error_code ec, resolver_type::results_type results)
- {
- results_.emplace(std::move(results));
-
- auto &this_ = *static_cast< Derived * >(this);
- this_.on_resolved(ec);
-
- auto &has_err = static_cast< has_error_code & >(this_);
- this_(self, has_err.set_error(ec));
- }
-
-```
-
-One cancels the other....
-
-```cpp
- void on_timeout()
- {
- this->cancel_resolver();
- log("Timeout");
- }
-
- void on_resolved(error_code const &ec)
- {
- this->cancel_timeout();
- log("Resolve complete: ", ec);
- }
-```
-
-```cpp
- auto resolving() const -> bool { return !results_.has_value(); }
-
- auto cancel_resolver() -> void { resolver_.cancel(); }
-```
-
-In the end I was unsure how much is gained, other than pretty code (which does have value in itself).
-
-# Unified WebClient
-
-Exploratory work started on the unified web client. After some discussion, Vinnie and I agreed on the following design
-decisions:
-
-* Interface to model closely the very popular Python Requests module.
-* Sync and Async modes available.
-* Homogenous (mostly non-template) interface, behind which system-specific implementations can reside.
-* Where native library support is available, that will be used,
-* Where not, internally the library will be implemented in Asio/Beast.
-* Coroutine friendly.
-
-Once more progress has been made on the Boost.Beast issue tracker, I will be focusing attention here.
-
diff --git a/_posts/2020-05-08-KrystiansAprilUpdate.md b/_posts/2020-05-08-KrystiansAprilUpdate.md
deleted file mode 100644
index 3592e0020..000000000
--- a/_posts/2020-05-08-KrystiansAprilUpdate.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's April Update
-author-id: krystian
----
-
-# Overview
-
-Boost 1.73.0 has been released! Save for some minor documentation issues, Boost.StaticString enjoyed a bug-free release, so most of this month was spent working on Boost.JSON getting it ready for review. Unfortunately, I could not spend too much time working due to school and final exams, but now that those have passed I'll be able to put in significantly more time working on projects such as Boost.JSON.
-
-# Boost.JSON
-
-A good portion of my work on Boost.JSON was spent updating the documentation to reflect the replacement of the `storage` allocator model with `boost::container::pmr::memory_resource` (or `std::pmr::memory_resource` in standalone). The old model wasn't necessarily bad, but using `memory_resource` permits the use of existing allocators found in Boost.Container/the standard library, eliminating the need for writing proprietary allocators that only work with Boost.JSON.
-
-Even though `storage` will be going away, `storage_ptr` will remain to support shared ownership of a `memory_resource` -- something that `polymorphic_allocator` lacks. As with `polymorphic_allocator`, `storage_ptr` will still support non-owning reference semantics in contexts where the lifetime of a `memory_resource` is bound to a scope, giving users more flexibility.
-
-I also worked on `monotonic_resource`, the `memory_resource` counterpart to `pool`. This allocator has one goal: to be *fast*. I ended up adding the following features to facilitate this (mostly from `monotonic_buffer_resource`):
-
-- Construction from an initial buffer,
-- The ability to reset the allocator without releasing memory, and
-- The ability to set a limit on the number of bytes that can be dynamically allocated.
-
-The implementations of these features are pretty trivial, but they provide significant opportunities to cut down on dynamic allocations. For example, when parsing a large number of JSON documents, a single `monotonic_resource` can be used and reset in between the parsing of each document without releasing any dynamically allocated storage. While care should be taken to destroy objects that occupy the storage before the allocator is reset, this can substantially reduce the number of allocations required and thus result in non-trivial performance gains.
-
-The other major thing I worked on was fixing an overload resolution bug on clang-cl involving `json::value`. This was originally brought to my attention by Vinnie when the CI build for clang-cl started reporting that overload resolution for `value({false, 1, "2"})` was ambiguous. After a few hours of investigating, I found that `false` was being treated as a null pointer constant -- something that was certainly annoying, but it also didn't fully explain why this error was happening.
-
-After this unfortunate discovery, I tried again with `value({0, 1, "2"})`, this time on clang, and it turns out this was a problem here as well. After *many* hours of testing, I found that the constructor in `storage_ptr` taking a parameter of type `memory_resource` had a small problem: its constraint was missing `::type` after the `enable_if`, allowing `storage_ptr` to be constructed from any pointer type, including `const char*`. This somewhat helped to alleviate the problem, but `value({false, false, false})` was still failing. After many more hours of groking the standard and trying to reproduce the error, I finally came upon the following `json::string` constructors:
-
-```
-string(string const& other, std::size_t pos, std::size_t count = npos, storage_ptr sp = {})
-
-string(string_view other, std::size_t pos, std::size_t count = npos, storage_ptr sp = {})
-```
-
-See the problem here? Since the first parameter of both constructors can be constructed from null pointer constants, overload resolution for `string(0, 0, 0)` would be ambiguous. However, this isn't the full story. Consider the following constructors for `value`:
-
-```
-value(std::initializer_list init)
-
-value(string str)
-```
-
-For the initialization of `value({0, 0, 0})` the implicit conversion sequence to `str` would be ambiguous, but the one to `value_ref` can be formed. There is a special rule for overload resolution (separate from two-stage overload resolution during list-initialization) that considers any list-initialization sequence that converts to `std::initializer_list` to be a better conversion sequence than one that does not, with the exception to this rule being that it only applies when the two conversion sequences are otherwise identical.
-
-This rule *should* apply here, however, I found that clang has a small bug that prevents this rule from going into effect if any of the candidates have an ambiguous conversion sequence for the same parameter. We solve this pretty trivially by removing some of the redundant constructor overloads in `json::string` and all was well. It was a fun little puzzle to solve (the explanation was a bit of an oversimplification; if you have questions please let me know).
-
-If you want to get in touch with me, you can message me on the [Cpplang slack](http://slack.cpp.al/), or [shoot me an email](mailto:sdkrystian@gmail.com).
\ No newline at end of file
diff --git a/_posts/2020-06-04-WebsitePreviews.md b/_posts/2020-06-04-WebsitePreviews.md
deleted file mode 100644
index 076c92bd8..000000000
--- a/_posts/2020-06-04-WebsitePreviews.md
+++ /dev/null
@@ -1,334 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Automated Documentation Previews
-author-id: sam
----
-# Overview
-
-Greetings, and welcome to my first blog post at The C++ Alliance.
-
-I've recently begun working on an interesting project for the Alliance which might also have more widespread applicability. The same requirement could possibly apply to your organization as well.
-
-Consider an open-source project that has multiple contributors who are submitting changes via pull-requests in Github. You'd like to have assurances that a pull-request passes all tests before being merged. That is done with continuous integration solutions such as Travis or Circle-CI, which are quite popular and well-known. Similarly, if the submission is *documentation*, you would like to be able to view the formatted output in it's final published format so you can review the layout, the colors, and so on. What would be the best way to build and publish documentation from pull requests?
-
-Perhaps the first thought would be to include the functionality in Travis or Circle-CI. And that is certainly possible. However, in some cases there may be sensitive passwords, ssh keys, or other tokens in the configuration. Is it safe to allow random pull requests, from conceivably anyone on the whole internet, to trigger a Circle-CI build that contains authentication information? Let's explore that question, and then present a possible alternative that should be more secure.
-
-# Security
-
-In Circle-CI, you can choose to enable or disable jobs for Pull Requests. It's clearly safer to leave them disabled, but if the goal is to run automatic tests, this feature must be turned on. Next, you may choose to enable or disable access to sensitive keys for Pull Requests. This sounds like a great feature that will allow the jobs to be run safely. You could build Pull Requests with limited authorization. But what if you'd like to include secret keys in the build, that are needed to publish the documentation to an external server which is going to host the resulting content. After building the docs, they must be transferred to wherever they will be hosted. That means you must either include the secret keys in plain text, or toggle the setting to enable sensitive keys in Circle-CI.
-
-Let's briefly think about the latter option. If secret keys are enabled in Circle-CI, they are not outright published or visible to the end-user. The build system obfuscates them. The obfuscation is a good first step. Unfortunately, there's a file called .circleci/config.yml in the project, which contains all the commands to be run by the build system. A pull request could modify that file so that it prints the secrets in clear text.
-
-What can be done?
-
-The answer - which is not overly difficult if you already have some experience - is to run an in-house build server such as Jenkins. This adds multiple layers of security:
-
-- Optionally, does *not* publicly print the build output.
-- Optionally, does *not* run based on a .circleci file or Jenkinsfile, so modifying the configuration file is not an avenue for external attacks.
-- For each build job, it will only include the minimal number of secret keys required for the current task, and nothing more.
-
-While the new system may not be impregnable, it's a major improvement compared to the security issues with Circle-CI for this specific requirement.
-
-# Design
-
-Here is a high level overview of how the system operates, before getting into further details.
-
-A jenkins server is installed.
-
-It builds the documentation jobs, and then copies the resulting files to AWS S3.
-
-The job posts a message in the GitHub pull request conversation with a hyperlink to the new docs.
-
-Each pull request will get it's own separate "website". There could be hundreds of versions being simultaneously hosted.
-
-An nginx proxy server which sits in front of S3 serves the documents with a consistent URL format, and allows multiple repositories to share the same S3 bucket.
-
-The resulting functionality can be seen in action. On this pull request [https://github.com/boostorg/beast/pull/1973](https://github.com/boostorg/beast/pull/1973) a message appears:
-
-| An automated preview of the documentation is available at [http://1973.beastdocs.prtest.cppalliance.org/libs/beast/doc/html/index.html](http://1973.beastdocs.prtest.cppalliance.org/libs/beast/doc/html/index.html) |
-
-The link takes you to the preview, and will be updated with each new commit to the pull request.
-
-# More Details
-
-The Jenkins server polls each configured repository at a 5 minute interval, to see if a new pull request has been added. Alternatively, instead of polling, you may add a webhook in Github.
-
-Each repository corresponds to a separate Jenkins "project" on the server. A job checks out a copy of the submitted code, runs the specific steps necessary for that codebase, and uploads the resulting website to an AWS S3 bucket.
-
-The configuration leverages a few Jenkins plugins:
-- "GitHub Pull Request Builder" to launch jobs based on the existence of a new pull request.
-- "S3 Publisher Plugin" for copying files to S3.
-- "CloudBees Docker Custom Build Environment Plugin" to run the build inside an isolated docker container.
-
-One previews bucket is created in S3 such as s3://example-previews
-
-The file path in the S3 bucket is formatted to be "repository"/"PR #". For example, the filepath of pull request #60 for the repo called "website" is s3://example-previews/website/60
-
-The web URL is generated by inverting this path, so "website/60" becomes "60.website". The full URL has the format "60.website.prtest.example.com". This translation is accomplished with an nginx reverse proxy, hosted on the same Jenkins server.
-
-nginx rule:
-rewrite ^(.*)$ $backendserver/$repo/$pullrequest$1 break;
-
-A wildcard DNS entry sends the preview visitors to nginx:
-*.prtest.example.com -> jenkins.example.com
-
-# Implementation
-
-In this section, we will go over all the steps in detail, as a tutorial.
-
-In the following code sections,
-Replace "example.com" with your domain.
-Replace "website" with your repository name.
-Replace "example-previews" with your S3 bucket name.
-
-### General Server Setup
-
-Install Jenkins - https://www.jenkins.io/doc/book/installing/
-
-Install SSL certificate for Jenkins (jenkins.example.com):
-```
-apt install certbot
-certbot certonly
-```
-
-Install nginx.
-
-```
-apt install nginx
-```
-
-Create a website, as follows:
-```
-server {
- listen 80;
- listen [::]:80;
- server_name jenkins.example.com;
- location '/.well-known/acme-challenge' {
- default_type "text/plain";
- root /var/www/letsencrypt;
- }
- location / {
- return 301 https://jenkins.example.com:8443$request_uri;
- }
-}
-
-server {
-listen 8443 ssl default_server;
-listen [::]:8443 ssl default_server;
-ssl_certificate /etc/letsencrypt/live/jenkins.example.com/fullchain.pem;
-ssl_certificate_key /etc/letsencrypt/live/jenkins.example.com/privkey.pem;
-#include snippets/snakeoil.conf;
-location / {
-include /etc/nginx/proxy_params;
-proxy_pass http://localhost:8080;
-proxy_read_timeout 90s;
-}
-}
-```
-
-Set the URL inside of Jenkins->Manage Jenkins->Configure System to be https://_url_ , replacing _url_ with the hostname such as jenkins.example.com.
-
-Install the plugin "GitHub pull requests builder"
-Go to ``Manage Jenkins`` -> ``Configure System`` -> ``GitHub pull requests builder`` section.
-
-Click "Create API Token". Log into github.
-
-Update "Commit Status Build Triggered", "Commit Status Build Start" to --none--
-Create all three types of "Commit Status Build Result" with --none--
-
-On the server:
-
-```
-apt install git build-essential
-```
-
-Install the plugin "CloudBees Docker Custom Build Environment"
-
-add Jenkins to docker group
-
-```
-usermod -a -G docker jenkins
-```
-
-Restart jenkins.
-
-```
-systemctl restart jenkins
-```
-
-Install the "S3 publisher plugin"
-
-In Manage Jenkins->Configure System, go to S3 Profiles, create profile. Assuming the IAM user in AWS is called "example-bot", then create example-bot-profile with the AWS creds. The necessary IAM permissions are covered a bit further down in this document.
-
-Install the "Post Build Task plugin"
-
-### Nginx Setup
-
-Create a wildcard DNS entry at your DNS hosting provider:
-*.prtest.website.example.com CNAME to jenkins.example.com
-
-Create an nginx site for previews:
-
-```
-server {
- # Listen on port 80 for all IPs associated with your machine
- listen 80 default_server;
-
- # Catch all other server names
- server_name _;
-
- if ($host ~* ([0-9]+)\.(.*?)\.(.*)) {
- set $pullrequest $1;
- set $repo $2;
- }
-
- location / {
- set $backendserver 'http://example-previews.s3-website-us-east-1.amazonaws.com';
-
- #CUSTOMIZATIONS
- if ($repo = "example" ) {
- rewrite ^(.*)/something$ $1/something.html ;
- }
-
- #FINAL REWRITE
- rewrite ^(.*)$ $backendserver/$repo/$pullrequest$1 break;
-
- # The rewritten request is passed to S3
- proxy_pass http://example-previews.s3-website-us-east-1.amazonaws.com;
- #proxy_pass $backendserver;
- include /etc/nginx/proxy_params;
- proxy_redirect /$repo/$pullrequest / ;
- }
-}
-
-```
-
-### AWS Setup
-
-Turn on static web hosting on the bucket.
-Endpoint is http://example-previews.s3-website-us-east-1.amazonaws.com
-
-Add bucket policy
-
-```
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "PublicReadGetObject",
- "Effect": "Allow",
- "Principal": "*",
- "Action": "s3:GetObject",
- "Resource": "arn:aws:s3:::example-previews/*"
- }
- ]
-}
-```
-
-Create an IAM user and add these permissions
-
-```
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Action": [
- "s3:GetBucketLocation",
- "s3:ListAllMyBuckets"
- ],
- "Resource": "*"
- },
- {
- "Effect": "Allow",
- "Action": [
- "s3:ListBucket"
- ],
- "Resource": [
- "arn:aws:s3:::example-previews"
- ]
- },
- {
- "Effect": "Allow",
- "Action": [
- "s3:PutObject",
- "s3:GetObject",
- "s3:DeleteObject"
- ],
- "Resource": [
- "arn:aws:s3:::example-previews/*"
- ]
- }
- ]
-}
-```
-
-### JENKINS FREESTYLE PROJECTS
-
-Create a new Freestyle Project
-
-Github Project (checked)
-Project URL: https://github.com/yourorg/website/
-
-Source Code Management
-Git (checked)
-Repositories: https://github.com/yourorg/website
-Credentials: github-example-bot (you should add a credential here, that successfully connects to github)
-Advanced:
-Refspec: +refs/pull/*:refs/remotes/origin/pr/*
-Branch Specifier: ${ghprbActualCommit}
-
-Build Triggers
-GitHub Pull Request Builder (checked)
-GitHub API Credentials: mybot
-
-#Consider whether to enable the following setting.
-#It is optional. You may also approve each PR.
-Advanced:
-Build every pull request automatically without asking.
-
-Trigger Setup:
-Build Status Message:
-`An automated preview of this PR is available at [http://$ghprbPullId.website.prtest.example.com](http://$ghprbPullId.website.prtest.example.com)`
-Update Commit Message during build:
-Commit Status Build Triggered: --none--
-Commit Status Build Started: --none--
-Commit Status Build Result: create all types of result, with message --none--
-
-Build Environment:
-Build inside a Docker container (checked)
-#Note: choose a Docker image that is appropriate for your project
-Pull docker image from repository: circleci/ruby:2.4-node-browsers-legacy
-
-Build:
-Execute Shell:
-```
-#Note: whichever build steps your site requires.
-```
-
-Post-build Actions
-Publish artifacts to S3
-S3 Profile: example-bot-profile
-
-Source: _site/** (set this value as necessary for your code)
-Destination: example-previews/example/${ghprbPullId}
-Bucket Region: us-east-1
-No upload on build failure (checked)
-
-#The following part is optional. It will post an alert into a Slack channel.
-Add Post Build Tasks
-
-Log Text: GitHub
-
-Script:
-
-```
-#!/bin/bash
-PREVIEWMESSAGE="A preview of the example website is available at http://$ghprbPullId.example.prtest.example.com"
-curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"$PREVIEWMESSAGE\"}" https://hooks.slack.com/services/T21Q22/B0141JT/aPF___
-```
-
-Check box "Run script only if all previous steps were successful"
-
-In Slack administration, (not in jenkins), create a Slack app. Create a "webhook" for your channel. That webhook goes into the curl command.
diff --git a/_posts/2020-06-31-RichardsJuneUpdate.md b/_posts/2020-06-31-RichardsJuneUpdate.md
deleted file mode 100644
index 501087e80..000000000
--- a/_posts/2020-06-31-RichardsJuneUpdate.md
+++ /dev/null
@@ -1,248 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's May/June Update
-author-id: richard
----
-
-# Boost 1.74 - Interesting Developments in Asio
-
-We're currently beta-testing Boost 1.74, the lead-up to which has seen a flurry of activity in Asio, which has
-impacted Beast.
-
-Recent versions of Asio have moved away from the idea of sequencing completion handlers directly on an `io_context`
-(which used to be called an `io_service`) towards the execution of completion handlers by an Executor.
-
-The basic idea being that the executor is a lightweight handle to some execution context, which did what the `io_context`
-always used to do - schedule the execution of completion handlers.
-
-The changes to Asio have been tracking
-[The Networking TS](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/n4771.pdf) which describes a concept
-of Executor relevant to asynchronous IO.
-
-The [Unified Executors](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0443r11.html) proposal unifies the
-concepts of io execution and the general concept of "a place to execute work" - a somewhat more generic idea than merely
-an IO loop or thread pool. Work has been ongoing by the members of WG21 to produce an execution model that
-serves all parties' needs.
-
-Courtesy of an incredible effort by Chris Kohlhoff, Latest Asio and Boost.Asio 1.74 has been updated to accommodate both
-models of executors, with the Unified Executors model being the default. It's important to note that most users won't
-notice the change in API this time around since by default the Asio in 1.74 also includes the 1.73 interface.
-
-There are a number of preprocessor macros that can be defined to change this default behaviour:
-
-## `BOOST_ASIO_NO_TS_EXECUTORS`
-
-Defining this macro disables the Networking TS executor model. The most immediate thing you'll notice if you define this
-macro is that for some executor `e`, the expression `e.context()` becomes invalid.
-
-In the Unified Executors world, this operation is expressed as a query against the executor:
-```c++
-auto& ctx = asio::query(e, asio::execution::context);
-```
-The idea being that the execution context is a _property_ of an executor that can be _queried_ for.
-
-Another change which users are likely to notice when this macro is defined is that the `asio::executor_work_guard<>`
-template corresponding `asio::make_work_guard` function is no longer defined.
-
-You may well ask then, how we would prevent an underlying execution context from running out of work?
-
-In the Unified Executors world, we can think of Executors as an unbounded set of types with various properties
-enabled or disabled. The idea is that the state of the properties define the behaviour of the interaction between the
-executor and its underlying context.
-
-In the new world, we don't explicitly create a work guard which references the executor. We 'simply' create a new
-executor which happens to have the property of 'tracking work' (i.e. this executor will in some way ensure that the
-underlying context has outstanding work until the executor's lifetime ends).
-
-Again, given that `e` is some executor, here's how we spell this:
-
-```c++
-auto tracked = asio::require(e, asio::execution::outstanding_work.tracked);
-```
-
-After executing this statement, there are now two executors in play. The first, `e` may or may not be "tracking work"
-(ensuring that the underlying context does not stop), but `tracked` certainly is.
-
-There is another way to spell this, more useful in a generic programming environment.
-
-Suppose you were writing generic code and you don't know the type of the executor presented to you, or even what kind
-of execution context it is associated with. However, you do know that *if* the underlying context can stop if it runs
-out of work, then we want to prevent it from doing so for the duration of some operation.
-
-In this case, we can't use `require` because this will fail to compile if the given executor does not support the
-`outstanding_work::tracked` property. Therefore we would request (or more correctly, _prefer_) the capability rather
-than require it:
-
-```c++
-auto maybe_tracked = asio::prefer(e, asio::execution::outstanding_work.tracked);
-```
-
-We can now use `maybe_tracked` as the executor for our operation, and it will "do the right thing" regarding the tracking
-of work whatever the underlying type of execution context. It is important to note that it _is_ an executor, not merely
-a guard object that contains an executor.
-
-### post, dispatch and defer
-
-Another notable change in the Asio API when this macro is defined is that models of the Executor concept lose their
-`post`, `dispatch` and `defer` member functions.
-
-The free function versions still remain, so if you have code like this:
-```c++
-e.dispatch([]{ /* something */ });
-```
-
-you will need to rewrite it as:
-
-```c++
-asio::dispatch(e, []{ /* something */ });
-```
-
-or you can be more creative with the underlying property system:
-
-```c++
-asio::execution::execute(
- asio::prefer(
- e,
- asio::execution::blocking.possibly),
- []{ /* something */ });
-```
-
-Which is more-or-less what the implementation of `dispatch` does under the covers. It's actually a little more involved
-than that since the completion token's associated allocator has to be taken into account. There is a
-property for that too: `asio::execution::allocator`.
-
-In summary, all previous Asio and Networking TS execution/completion scenarios are now handled by executing a handler
-in some executor supporting a set of relevant properties.
-
-## BOOST_ASIO_NO_DEPRECATED
-
-Defining this macro will ensure that old asio-style invocation and allocation completion handler customisation
-functions will no longer be used. The newer paradigm is to explicitly query or require execution properties at the
-time of scheduling a completion handler for invocation. If you don't know what any of that means, you'd be in the
-majority and don't need to worry about it.
-
-## BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
-
-As of Boost 1.74, Asio IO objects will be associated with the new `asio::any_io_executor` rather than the previous
-polymorphic `asio::executor`. Defining this macro, undoes this change. It may be useful to you if you have written code
-that depends on the use of `asio::executor`.
-
-## Other observations
-
-### Strands are Still a Thing
-
-Asio `strand` objects still seem to occupy a twilight zone between executors and something other than executors.
-
-To be honest, when I first saw the property mechanism, I assumed that a strand would be "just another executor" with
-some "sequential execution" property enabled. This turns out not to be the case. A strand has its own distinct execution
-context which manages the sequencing of completion handler invocations within it. The strand keeps a copy of the inner
-executor, which is the one where the strand's completion handlers will be invoked in turn.
-
-However, a strand models the Executor concept, so it also *is an* executor.
-
-### execute() looks set to become the new call().
-
-Reading the Unified Executors paper is an interesting, exciting or horrifying experience - depending on your view of
-what you'd like C++ to be.
-
-My take from the paper, fleshed out a little with the experience of touching the implementation in Asio, is that in the
-new world, the programming thought process will go something like this,
-imagine the following situation:
-
-"I need to execute this set of tasks,
-
-Ideally I'd like them to execute in parallel,
-
-I'd like to wait for them to be done"
-
-As I understand things, the idea behind unified executors is that I will be able to express these desires and mandates
-by executing my work function(s) in some executor yielded by a series of calls to `prefer` and `require`.
-
-Something like:
-
-```c++
- auto eparallel = prefer(e, bulk_guarantee.unsequenced); // prefer parallel execution
- auto eblock = require(eparallel, blocking.always); // require blocking
- execute(eblock, task1, task2, task3, task...); // blocking call which will execute in parallel if possible
-```
-
-Proponents will no doubt think,
-
-"Great! Programming by expression of intent".
-
-Detractors might say,
-
-"Ugh! Nondeterministic programs. How do I debug this when it goes wrong?"
-
-To be honest that this stage, I find myself in both camps. No doubt time will tell.
-
-# Adventures in B2 (Boost Build)
-
-Because of the pressure of testing Beast with the new multi-faceted Asio, I wanted a way to bulk compile and test many
-different variants of:
-
-* Compilers
-* Preprocessor macro definitions
-* C++ standards
-* etc.
-
-I was dimly aware that the Boost build tool, B2, was capable of doing this from one command-line invocation.
-
-It's worth mentioning at this point that I have fairly recently discovered just how powerful B2 is. It's a shame that
-it has never been offered to the world in a neat package with some friendly conversation-style documentation, which
-seems to be the norm these days.
-
-It can actually do anything CMake can do and more. For example, all of the above.
-
-My thanks to Peter Dimov for teaching me about the existence of B2 *features* and how to use them.
-
-It turns out to be a simple 2-step process:
-
-First defined a `user-config.jam` file to describe the feature and its settings:
-
-```jam
-import feature ;
-
-feature.feature asio.mode : dflt nodep nots ts nodep-nots nodep-ts : propagated composite ;
-feature.compose nodep : "BOOST_ASIO_NO_DEPRECATED" ;
-feature.compose nots : "BOOST_ASIO_NO_TS_EXECUTORS" ;
-feature.compose ts : "BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT" ;
-feature.compose nodep-nots : "BOOST_ASIO_NO_DEPRECATED" "BOOST_ASIO_NO_TS_EXECUTORS" ;
-feature.compose nodep-ts : "BOOST_ASIO_NO_DEPRECATED" "BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT" ;
-
-using clang : : clang++ : "libc++" "-Wno-c99-extensions" ;
-using gcc : : g++ : "-Wno-c99-extensions" ;
-```
-
-Then ask b2 to do the rest:
-
-```
-./b2 --user-config=./user-config.jam \
- toolset=clang,gcc \
- asio.mode=dflt,nodep,nots,ts,nodep-nots,nodep-ts \
- variant=release \
- cxxstd=2a,17,14,11 \
- -j`grep processor /proc/cpuinfo | wc -l` \
- libs/beast/test libs/beast/example
-```
-
-This will compile all examples and run all tests in beast on a linux platform for the cross-product of:
-
-1. clang and gcc
-2. all 6 of the legal combinations of the preprocessor macros BOOST_ASIO_NO_DEPRECATED, BOOST_ASIO_NO_TS_EXECUTORS and
-BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
-3. C++ standards 2a, 17, 14 and 11
-
-So that's 48 separate scenarios.
-
-It will also:
-
-* Build any dependencies.
-* Build each scenario into its own separately named path in the bin.v2 directory.
-* Understand which tests passed and failed so that passing tests are not re-run on subsequent calls to b2 unless a
- dependent file has changed.
-* Use as many CPUs as are available on the host (in my case, fortunately that's 48, otherwise this would take a long time
- to run)
-
diff --git a/_posts/2020-07-01-KrystiansMayJuneUpdate.md b/_posts/2020-07-01-KrystiansMayJuneUpdate.md
deleted file mode 100644
index b62b47617..000000000
--- a/_posts/2020-07-01-KrystiansMayJuneUpdate.md
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's May & June Update
-author-id: krystian
----
-
-# Overview
-
-I've been very busy these last two months getting Boost.JSON ready for release, hence the combined blog post. Now that things are winding down, I hopefully can get back the normal blog release schedule.
-
-# Boost.JSON
-
-Aside from a couple of personal projects, the vast majority of my time was spent getting Boost.JSON set for release. Breaking it down, this consisted of three main tasks: a `tag_invoke` based `value` conversion interface, parser optimizations, and support for extended JSON syntax.
-
-## Value Conversion
-
-Our previous interface that allowed users to specify their own conversions to and from `value` proved unsatisfactory, as it required too much boiler-plate when specifying conversions to and from non-class types (e.g. enumeration types). To remedy this, I was tasked with implementing an ADL solution based on `tag_invoke` which greatly reduces the amount of boiler-plate and provides a single, straightforward way to implement a custom conversion. For example, consider the following class type:
-
-```cpp
-struct customer
-{
- std::string name;
- std::size_t balance;
-};
-```
-
-To convert an object of type `customer` to `value`, all you need is to write an overload of `tag_invoke`. This can be implemented as an inline `friend` function within the class definition (thus making it visible to ADL but not unqualified lookup; see [[basic.lookup.argdep] p4.3]](http://eel.is/c++draft/basic.lookup.argdep#4.3)), or as a free function:
-
-```cpp
-void tag_invoke(value_from_tag, value& jv, const customer& c)
-{
- object& obj = jv.emplace_object();
- obj["name"] = c.name;
- obj["balance"] = c.balance;
-}
-```
-
-Note that a reference to `value` is passed to the function performing the conversion. This ensures that the `storage_ptr` passed to the calling function (i.e. `value_from(T&&, storage_ptr)`) is correctly propagated to the result.
-
-Conversions from `value` to a type `T` are specified in a similar fashion:
-
-```cpp
-customer tag_invoke(value_to_tag, const value& jv)
-{
- return customer{
- value_to(jv.at("name"])),
- jv.at("balance").as_uint64()
- };
-}
-```
-
-In addition to user-provided `tag_invoke` overloads, generic conversions are provided for container-like, map-like, and string-like types, with obvious results. In general, if your container works with a range-based for loop, it will work with `value_from` and `value_to` without you having to write anything.
-
-## Parser Optimizations
-
-Optimizing the parser was a side-project turned obsession for me. While it's often a painfully tedious process of trying an idea, running benchmarks, and being disappointed with the results, the few times that you get a performance increase makes it all worth it.
-
-To preface, Boost.JSON is unique in that it can parse incrementally (no other C++ libraries implement this). However, incremental parsing is considerably slower than parsing a JSON document in its entirety, as a stack must be maintained to track which function the parser should resume to once more data is available. In addition to this, the use cases for incremental parsing will often involve bottlenecks much more significant than the speed of the parser. With this in mind, Boost.JSON's parser is optimized for non-incremental parsing of a valid JSON document. The remainder of this post will be written without consideration for incremental parsing.
-
-Most of the optimizations were branch eliminations, such as removing branches based on call site preconditions. These yield small performance gains, but once compounded we saw a performance increase of up to 7% on certain benchmarks. The biggest gain in this category came from removing a large switch statement in `parse_value` in favor of a manually written jump table. Making this function branchless significantly increases performance as it's the most called function when parsing. This also makes the function very compact, meaning it can be inlined almost everywhere.
-
-In addition to benchmark driven optimization, I also optimized based on codegen. Going into it I really had no idea what I was doing, but after staring at it for a long time and watching some videos I got the hang of it. I used this method to optimize `parse_array` and `parse_object`, aiming to get the most linear hot path possible, with the fewest number of jumps. It took a few hours, but I was able to reach my target. This was done by moving some branches around, removing the `local_const_stream` variable, and adding some optimization hints to various branches. In addition to this, the `std::size_t` parameter (representing the number of elements) was removed from the `on_array_end` and `on_object_end` handlers as it didn't provide any useful information and is not used by `parser`. This yielded a performance increase of up to 4% in certain cases.
-
-The last major optimization was [suggested](https://github.com/CPPAlliance/json/issues/115) by [Joaquín M López Muñoz](https://github.com/joaquintides). In essence, integer division is a slow operation, so compilers have all sorts of ways to avoid it; one of which is doing multiplication instead. When dividing by a constant divisor, the compiler is able to convert this to multiplication by the reciprocal of the divisor, which can be up to 20 times faster. Where this is applicable in Boost.JSON is in the calculation used to get the index of the bucket for a `object` key. The implementation was pretty straightforward, and it yielded up to a 10% increase in performance for `object` heavy benchmarks -- a remarkable gain from such a small change. Thank you Joaquín :)
-
-## Parser Extensions
-
-The last major thing I worked on for Boost.JSON was implementing support for extended JSON syntaxes. The two supported extensions are: - allowing C and C++ style comments to appear within whitespace, and
-- allowing trailing commas to appear after the last element of an array or object.
-This post isn't quite in chronological order, but comment support was my introduction into working on the parser (a trial by fire). After a few naive attempts at implementation, the result was comment parsing that did not affect performance at all when not enabled (as it should) and only has a minor impact on performance when enabled. This was done by building off existing branches within `parse_array` and `parse_object` instead of checking for comments every time whitespace is being parsed. Allowing for trailing commas was done in much the same way. The larger takeaway from implementing these extensions was getting to know the internals of the parser much better, allowing me to implement the aforementioned optimizations, as well as more complex extensions in the future.
-
-If you want to get in touch with me, you can message me on the [Cpplang slack](http://slack.cpp.al/), or [shoot me an email](mailto:sdkrystian@gmail.com).
\ No newline at end of file
diff --git a/_posts/2020-08-01-KrystiansJulyUpdate.md b/_posts/2020-08-01-KrystiansJulyUpdate.md
deleted file mode 100644
index 16a04ff54..000000000
--- a/_posts/2020-08-01-KrystiansJulyUpdate.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's July Update
-author-id: krystian
----
-
-# What I've been doing
-
-I've been spending a *lot* of time working on optimizing the parser; perhaps a bit too much. Nevertheless, it's very enjoyable and in doing so I've learned more than I could hope to ever learn in school. In addition to the optimization, comment and trailing comma support finally got merged, and I implemented UTF-8 validation (enabled by default, but it can be disabled).
-
-## UTF-8 validation
-
-Prior to implementing this extension (or rather, feature which can be disabled), the parser considers any character appearing within a string to be valid, so long as it wasn't a control character or formed an illegal escape. While this is _fast_, it technically does not conform to the JSON standard.
-
-As per Section 2 of the [JSON Data Interchange Syntax Standard](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf):
-
-> A conforming JSON text is a sequence of Unicode code points that strictly conforms to the JSON grammar defined by this specification.
-
-As with most standardese, this particular requirement for conformance is not outright stated, but rather implied. Anyways, that's enough standardese talk for this post.
-
-After working on this parser so much, I've pretty much got the suspend/resume idiom we use nailed down, so integrating it with the string parsing function was trivial... the actual validation, not so much. I hadn't the slightest clue about any of the terminology used in the Unicode standard, so it took a good couple of hours to point myself in the right direction. Anyways, a lot of Googling and a messy python script for generating valid and invalid byte sequences later, I had something functional.
-
-Then came my favorite part: optimization.
-
-The first byte within a UTF-8 byte sequence determines how many bytes will follow, as well as the valid ranges for these following bytes. Since this byte has such a large valid range, I settled on using a lookup table to check whether the first byte is valid.
-
-Luckily, the following bytes have ranges that can be trivially checked using a mask. For example, if the first byte is `0xE1`, then the byte sequence will be composed of three bytes, the latter two having a valid range of `0x80` to `0xBF`. Thus, our fast-path routine to verify this sequence can be written as:
-
-```cpp
-uint32_t v;
-// this is reversed on big-endian
-std::memcpy(&v, bytes, 4); // 4 bytes load
-
-switch (lookup_table[v & 0x7F]) // mask out the most significant bit
-{
-...
-case 3:
- if ((v & 0x00C0C000) == 0x00808000)
- return result::ok;
- return result::fail;
-...
-}
-```
-
-This works well for all but one byte sequence combination. For whatever reason, UTF-8 byte sequences that start with `0xF0` can have a second byte between `0x90` and `0xBF` which requires the check to be done as:
-
-```cpp
-(v & 0xC0C0FF00) + 0x7F7F7000 <= 0x00002F00
-```
-
-It's a weird little outlier that I spent way too much time trying to figure out.
-
-Since our parser supports incremental parsing, we only take the fast path if the input stream has four or more bytes remaining. If this condition isn't met, we have to check each byte individually. It's slower, but shouldn't happen often.
-
-## Other optimizations
-
-I've been trying out a number of different optimizations to squeeze all the performance we can get out of the parser. Most recently, I rewrote the parser functions to take a `const char*` parameter indicating the start of the value, and return a pointer to the end of the value (if parsing succeeds) or `nullptr` upon failure or partial parsing.
-
-Since I'm not great at explaining things, here's the before:
-
-```cpp
-result parse_array(const_stream&);
-```
-
-and here's the after:
-
-```cpp
-const char* parse_array(const char*);
-```
-
-This allows us to keep the pointer to the current position in the stream entirely within the registers when parsing a document. Since the value is local to the function, the compiler no longer needs to write it to the `const_stream` object at the top of the call stack (created within `basic_parser::write_some`), nor read it each time a nested value is parsed. This yields an *8%* boost in performance across the board.
-
-More time was spent optimizing the SSE2 functions used for parsing unescaped strings and whitespace as well. Within `count_whitespace`, we were able to get rid of a `_mm_cmpeq_epi8` (`PCMPEQB`) instruction by performing a bitwise or with 4 after testing for spaces, and then comparing the result with `'\r'`, as the ASCII value of tab (`'\t'`) only differs from that of the carriage return by the third least significant bit. This was something that clang was doing for us, but it's nice to implement it for all other compilers.
-
-For `count_unescaped` (used to parse unescaped strings), we were able to again reduce the length of the hot path, this time a bit more significantly. Instead of checking for control characters by means of relational comparison, we can instead check for quotes and backslash first, and once that's done, the `_mm_min_epu8` (`PMINUB`) instruction can be used to set all control characters (0 - 31) to 31, and then test for equality. This brought our performance on the `strings.json` benchmark past the 8 GB/s mark from around 7.7 GB/s. Combined with the optimization of how the stream pointer is passed around, we now hit just a hair under 8.5 GB/s on this benchmark.
-
-## The important but boring stuff
-
-After merging the parser extensions, there was a bunch of housekeeping to do such as improving coverage and writing documentation. Though these are far from being my favorite tasks, they are integral to writing a good library, so it must be done. My initial approach to writing tests for the parser extensions was to run each test on every parser configuration we have, but this soon proved to be a nonoptimal approach when the time taken to run the test suite quadrupled. I ended up doing the right thing by making the tests more surgical in nature, and in doing so we even got 100% coverage on the parser.
diff --git a/_posts/2020-08-01-RichardsJulyUpdate.md b/_posts/2020-08-01-RichardsJulyUpdate.md
deleted file mode 100644
index d8bfa955b..000000000
--- a/_posts/2020-08-01-RichardsJulyUpdate.md
+++ /dev/null
@@ -1,116 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's July Update
-author-id: richard
----
-
-# Boost 1.74 - Progress Update
-
-Boost 1.74 beta release has been published and the various maintainers are applying last-minute bug fixes to their
-libraries in readiness for the final release on 12th August.
-
-For us in the Beast team, a fair amount of attention has been spent monitoring last minutes changes to Asio, as Chris
-makes the final tweaks after the Unified Executors update I mentioned in last month's blog.
-
-## Comprehensive Testing
-
-Last month I [committed](https://github.com/boostorg/beast/commit/b84d8ad3d48d173bd78ed6dc2ed8d26d84762af3) what I hoped
-would be the first of a suite of Dockerfiles which help the mass testing of Beast. The upstream changes to Asio
-were a lesson in just how many compilers, hosts and target environments we have to support in order that our user base
-is not surprised or impeded as a result of compiler selection or imposition.
-
-I am not expert is Docker matters. I mean, I can read the manual and follow basic instructions like anyone else,
-but I was hoping that someone would come along to help flesh out the suite a little. Particularly for the Windows
-builds, since I have no experience in installing software from the command line in Windows, and the greatest respect
-for those individuals who have mastered the art.
-
-Fortunately for me, we've had a new addition to the team. Sam Darwin, who has submitted a number of commits which
-increase Docker coverage. Of these I was most pleased to see the submission of the
-[Windows](https://github.com/boostorg/beast/commit/3486e9cb18aa39b392e07031a33e65b1792fbccf) build matrix which has
-been of enormous value. I think it would be fair to say that Microsoft Visual Studio is nothing short of notorious
-for its subtle deviations from the standard. As if it were not difficult enough already to create useful and coherent
-template libraries, supporting (particularly older) versions of MSVC requires extra care and workarounds.
-
-Hopefully, now that two-phase lookup has been firmly
-[adopted](https://devblogs.microsoft.com/cppblog/two-phase-name-lookup-support-comes-to-msvc/) by Microsoft (some two
-decades after its standardisation), this kind of issue will become less of a concern as time moves forward and support
-for older compilers is gradually dropped.
-
-To be fair to Microsoft, if my memory serves, they were pioneers of bringing the C++ language to the masses back in the
-days of Visual Studio 97 and prior to that, the separate product Visual C++ (which we used to have to pay for!).
-
-In hindsight a number of errors were made in terms of implementation that had lasting effects on a generation of
-developers and their projects. But arguably, had Microsoft not championed this effort, it is likely that C++ may not
-have achieved the penetration and exposure that it did.
-
-# A Bug In Asio Resolver? Surely Not?
-
-One of the examples in the Beast repository is a simple
-[web crawler](https://github.com/boostorg/beast/tree/develop/example/http/client/crawl). If you have taken sufficient
-interest to read the code, you will have noticed that it follows the model of "multiple threads, `one io_context` per
-thread."
-
-This may seem an odd decision, since a web crawler spends most of its time idle waiting for asynchronous IO to complete.
-However, there is an unfortunate implementation detail in the *nix version of `asio::ip::tcp::resolver` which is due to
-a limitation of the [`getaddrinfo`](https://linux.die.net/man/3/getaddrinfo) API upon which it depends.
-
-For background, `getaddrinfo` is a thread-safe, blocking call. This means that Asio has to spawn a background thread
-in order to perform the actual name resolution as part of the implementation of `async_resolve`.
-
-So with that out of the way, why am I writing this?
-
-One of the bugs I tackled this month was that this demo, when run with multiple (say 500) threads, can be reliably made
-to hang indefinitely on my Fedora 32 system. At first, I assumed that either we or Asio had introduced a race condition.
-However, after digging into the lockup it turned out to almost always lock up while resolving the FQDN `secureupload.eu`.
-
-Investigating further, it turns out that the nameserver response for this FQDN is too long to fit into a UDP packet.
-This means that the DNS client on the Linux host is forced to revert to a TCP connection in order to receive the entire
-record. This can be evidenced by using `nslookup` on the command line:
-
-```
-$ nslookup secureupload.eu
-Server: 192.168.0.1 <<-- address of my local nameserver
-Address: 192.168.0.1#53
-
-Non-authoritative answer:
-Name: secureupload.eu
-Address: 45.87.161.67
-
-... many others ...
-
-Name: secureupload.eu
-Address: 45.76.235.58
-;; Truncated, retrying in TCP mode. <<-- indication that nslookup is switching to TCP
-Name: secureupload.eu
-Address: 2a04:5b82:3:209::2
-
-... many others
-
-Name: secureupload.eu
-Address: 2a04:5b82:3:203::2
-```
-
-Furthermore, whenever I checked the call stack, the thread in question was always stuck in the glibc function
-[`send_vc()`](https://code.woboq.org/userspace/glibc/resolv/res_send.c.html#send_vc), which is called by `getaddrinfo`
-in response to the condition of a truncated UDP response.
-
-So despite my initial assumption that there must be a race in user code, the evidence was starting to point to something
-interesting about this particular FQDN. Now I've been writing software for over three decades on and off and I've seen
-a lot of bugs in code that the authors were adamant they they had not put there. We are as a rule, our own worst
-critics. So I was reluctant to believe that there could be a data-driven bug in glibc.
-
-Nevertheless, a scan of the redhat bug tracker by Chris turned up
-[this little nugget](https://bugzilla.redhat.com/show_bug.cgi?id=1429442).
-
-It turns out that what was happening was that the TCP connection to the upstream name server had gone quiet or had
-dropped packets - presumably courtesy my cheap Huawei Home Gateway router which was being swamped by 500 simultaneous
- requests by the 500 threads I had assigned to the crawler. Because the glibc implementation does not implement
-a timeout on the request, the failed reception of a response by the nameserver caused the call to `gethostinfo` to hang
-whenever this FQDN was being resolved.
-
-So it turns out that there is indeed a bug in glibc, which can affect any *nix (or cygwin) program that performs DNS
-address resolution when the requested domain's response is too long to fit in a UDP response message.
-
-Until this bug is fixed, I have learned a lesson and you have been warned.
diff --git a/_posts/2020-09-01-RichardsAugustUpdate.md b/_posts/2020-09-01-RichardsAugustUpdate.md
deleted file mode 100644
index 63be03e91..000000000
--- a/_posts/2020-09-01-RichardsAugustUpdate.md
+++ /dev/null
@@ -1,250 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's August Update
-author-id: richard
----
-
-# New Debugging Feature in Asio and Beast
-
-As covered previously, Boost 1.74 brought an implementation of the new unified executors model to Boost.Asio.
-
-Support for this is not the only thing that is new in Beast.
-
-Chris Kohlhoff recently submitted a [PR](https://github.com/boostorg/beast/pull/2053) to Beast's repository
-demonstrating how to annotate source code with the `BOOST_ASIO_HANDLER_LOCATION` macro. I have since followed up and
-annotated all asynchronous operations in Beast this way.
-
-In a normal build, there is no effect (and zero extra code generation). However, defining the preprocessor macro
-`BOOST_ASIO_ENABLE_HANDLER_TRACKING` will cause these macros to generate code which will emit handler tracking
-log data to stdout in a very specific format.
-
-The output is designed to describe the flow of asynchronous events in a format suitable for generating a visualisation
-in linear terms. i.e. the asynchronous events are flattened and linked to show causality.
-
-Here is an example of the output:
-
-```
-@asio|1597543084.233257|>33|
-@asio|1597543084.233273|33|deadline_timer@0x7fa6cac25218.cancel
-@asio|1597543084.233681|33^34|in 'basic_stream::async_write_some' (../../../../../../boost/beast/core/impl/basic_stream.hpp:321)
-@asio|1597543084.233681|33^34|called from 'async_write' (../../../../../../boost/asio/impl/write.hpp:331)
-@asio|1597543084.233681|33^34|called from 'ssl::stream<>::async_write_some' (../../../../../../boost/asio/ssl/detail/io.hpp:201)
-@asio|1597543084.233681|33^34|called from 'http::async_write_some' (../../../../../../boost/beast/http/impl/write.hpp:64)
-@asio|1597543084.233681|33^34|called from 'http::async_write' (../../../../../../boost/beast/http/impl/write.hpp:223)
-@asio|1597543084.233681|33^34|called from 'http::async_write(msg)' (../../../../../../boost/beast/http/impl/write.hpp:277)
-@asio|1597543084.233681|33*34|deadline_timer@0x7fa6cac25298.async_wait
-@asio|1597543084.233801|33^35|in 'basic_stream::async_write_some' (../../../../../../boost/beast/core/impl/basic_stream.hpp:373)
-@asio|1597543084.233801|33^35|called from 'async_write' (../../../../../../boost/asio/impl/write.hpp:331)
-@asio|1597543084.233801|33^35|called from 'ssl::stream<>::async_write_some' (../../../../../../boost/asio/ssl/detail/io.hpp:201)
-@asio|1597543084.233801|33^35|called from 'http::async_write_some' (../../../../../../boost/beast/http/impl/write.hpp:64)
-@asio|1597543084.233801|33^35|called from 'http::async_write' (../../../../../../boost/beast/http/impl/write.hpp:223)
-@asio|1597543084.233801|33^35|called from 'http::async_write(msg)' (../../../../../../boost/beast/http/impl/write.hpp:277)
-@asio|1597543084.233801|33*35|socket@0x7fa6cac251c8.async_send
-@asio|1597543084.233910|.35|non_blocking_send,ec=system:0,bytes_transferred=103
-@asio|1597543084.233949|<33|
-@asio|1597543084.233983|<31|
-@asio|1597543084.234031|>30|ec=system:89
-@asio|1597543084.234045|30*36|strand_executor@0x7fa6cac24bd0.execute
-@asio|1597543084.234054|>36|
-@asio|1597543084.234064|<36|
-@asio|1597543084.234072|<30|
-@asio|1597543084.234086|>35|ec=system:0,bytes_transferred=103
-@asio|1597543084.234100|35*37|strand_executor@0x7fa6cac24bd0.execute
-@asio|1597543084.234109|>37|
-@asio|1597543084.234119|37|deadline_timer@0x7fa6cac25298.cancel
-@asio|1597543084.234198|37^38|in 'basic_stream::async_read_some' (../../../../../../boost/beast/core/impl/basic_stream.hpp:321)
-@asio|1597543084.234198|37^38|called from 'ssl::stream<>::async_read_some' (../../../../../../boost/asio/ssl/detail/io.hpp:168)
-@asio|1597543084.234198|37^38|called from 'http::async_read_some' (../../../../../../boost/beast/http/impl/read.hpp:212)
-@asio|1597543084.234198|37^38|called from 'http::async_read' (../../../../../../boost/beast/http/impl/read.hpp:297)
-@asio|1597543084.234198|37^38|called from 'http::async_read(msg)' (../../../../../../boost/beast/http/impl/read.hpp:101)
-@asio|1597543084.234198|37*38|deadline_timer@0x7fa6cac25218.async_wait
-@asio|1597543084.234288|37^39|in 'basic_stream::async_read_some' (../../../../../../boost/beast/core/impl/basic_stream.hpp:373)
-@asio|1597543084.234288|37^39|called from 'ssl::stream<>::async_read_some' (../../../../../../boost/asio/ssl/detail/io.hpp:168)
-@asio|1597543084.234288|37^39|called from 'http::async_read_some' (../../../../../../boost/beast/http/impl/read.hpp:212)
-@asio|1597543084.234288|37^39|called from 'http::async_read' (../../../../../../boost/beast/http/impl/read.hpp:297)
-@asio|1597543084.234288|37^39|called from 'http::async_read(msg)' (../../../../../../boost/beast/http/impl/read.hpp:101)
-@asio|1597543084.234288|37*39|socket@0x7fa6cac251c8.async_receive
-@asio|1597543084.234334|.39|non_blocking_recv,ec=system:35,bytes_transferred=0
-@asio|1597543084.234353|<37|
-@asio|1597543084.234364|<35|
-@asio|1597543084.234380|>34|ec=system:89
-@asio|1597543084.234392|34*40|strand_executor@0x7fa6cac24bd0.execute
-@asio|1597543084.234401|>40|
-@asio|1597543084.234408|<40|
-@asio|1597543084.234416|<34|
-@asio|1597543084.427594|.39|non_blocking_recv,ec=system:0,bytes_transferred=534
-@asio|1597543084.427680|>39|ec=system:0,bytes_transferred=534
-```
-
-So far, so good. But not very informative or friendly to the native eye.
-
-Fortunately as of Boost 1.74 there is a tool in the Asio source tree to convert this data into something consumable by the open source
-tool dot, which can then output the resulting execution graph in one of a number of common graphical formats such as
-PNG, BMP, SVG and many others.
-
-Here is an example of a visualisation of a simple execution graph:
-
-![](/images/posts/richard/2020-09-01-handler-tracking-example.png)
-
-The tool you need to do this is in the `asio` subproject of the Boost repo. The full path is
-`libs/asio/tools/handlerviz.pl`. The command is self-documenting but for clarity, the process would be like this:
-* Compile and link your program with the compiler flag `-DBOOST_ASIO_ENABLE_HANDLER_TRACKING`
-* run your program, capturing stdout to a file (say `mylog.txt`) (or you can pipe it to the next step)
-* `handlerviz.pl < mylog.txt | dot -Tpng mygraph.png`
-* You should now be able to view your graph in a web browser, editor or picture viewer.
-
-The documentation for dot is [here](https://linux.die.net/man/1/dot) dot is usually available in the graphviz package
-of your linux distro/brew cask. Windows users can download an executable suite
-[here](https://www.graphviz.org/download/).
-
-If you have written your own asynchronous operations to compliment Beast or Asio, or indeed you just wish you add your
-handler locations to the graph output, you can do so by inserting the `BOOST_ASIO_HANDLER_LOCATION` macro just before
-each asynchronous suspension point (i.e. just before the call to `async_xxx`). If you're doing this in an Asio
-`coroutine` (not to be confused with C++ coroutines) then be sure to place the macro in curly braces after the
-YIELD macro, for example:
-
-```
- ...
-
- // this marks a suspension point of the coroutine
- BOOST_ASIO_CORO_YIELD
- {
- // This macro creates scoped variables so must be in a private scope
- BOOST_ASIO_HANDLER_LOCATION(( // note: double open brackets
- __FILE__, __LINE__, // source location
- "websocket::tcp::async_teardown" // name of the initiating function
- ));
-
- // this is the initiation of the next inner asynchronous operation
- s_.async_wait(
- net::socket_base::wait_read,
- beast::detail::bind_continuation(std::move(*this)));
-
- // there is an implied return statement here
- }
-
- ...
-```
-
-When writing applications, people historically have used Continuation Passing Style when calling asynchronous
-operations, capturing a shared_ptr to the connection implementation in each handler (continuation).
-
-When using this macro in user code with written in continuation passing style, you might do so like this:
-
-```
-void send_request(http::request req)
-{
- send_queue_.push_back(std::move(req));
- if (!sending_)
- {
- sending_ = true;
- maybe_initiate_send();
- }
-}
-
-void my_connection_impl::maybe_initiate_send()
-{
- if (send_queue_.empty())
- {
- sending_ = false;
- return;
- }
-
- // assume request_queue_ is a std::deque so elements will have stable addresses
- auto& current_request = request_queue_.front();
-
- BOOST_ASIO_HANDLER_LOCATION((
- __FILE__, __LINE__,
- "my_connection_impl::maybe_initiate_send"
- ));
-
- // suspension point
-
- boost::beast::http::async_write(stream_, current_request_,
- [self = this->shared_from_this()](boost::beast::error_code ec, std::size_t)
- {
- // continuation
-
- if (!ec)
- {
- self->request_queue_.pop_front();
- self->maybe_initiate_send();
- }
- else
- {
- // handle error
- }
- });
-}
-```
-
-
-If you're using c++ coroutines it becomes a little more complicated as you want the lifetime of the tracking
-state to be destroyed after the asynchronous initiation function but before the coroutine continuation:
-
-```
-namespace net = boost::asio;
-namespace http = boost::beast::http;
-
-auto connect_and_send(
- boost::asio::ip::tcp::socket& stream,
- std::string host,
- std::string port,
- http::request req)
--> net::awaitable
-{
- namespace net = boost::asio;
-
- auto resolver = net::ip::tcp::resolver(co_await net::this_coro::executor);
-
- // suspension point coming up
-
- auto oresults = std::optional>();
- {
- BOOST_ASIO_HANDLER_LOCATION((
- __FILE__, __LINE__,
- "my_connection_impl::connect_and_send"
- ));
- oresults.emplace(resolver.async_resolve(host, port, net::use_awaitable));
- }
- auto results = co_await std::move(*oresults);
-
- auto oconnect = std::optional>();
- {
- BOOST_ASIO_HANDLER_LOCATION((
- __FILE__, __LINE__,
- "my_connection_impl::connect_and_send"
- ));
- oconnect.emplace(net::async_connect(stream, results, net::use_awaitable));
- }
- auto ep = co_await *std::move(oconnect);
-
- // ... and so on ...
-
-}
-```
-
-Which might look a little unwieldy compared to the unannotated code, which could look like this:
-
-```
-auto connect_and_send(
- boost::asio::ip::tcp::socket& stream,
- std::string host,
- std::string port,
- http::request req)
--> net::awaitable
-{
- namespace net = boost::asio;
-
- auto resolver = net::ip::tcp::resolver(co_await net::this_coro::executor);
-
- auto ep = co_await net::async_connect(stream,
- co_await resolver.async_resolve(host, port, net::use_awaitable),
- net::use_awaitable);
-
- // ... and so on ...
-
-}
-```
diff --git a/_posts/2020-09-06-KrystiansAugustUpdate.md b/_posts/2020-09-06-KrystiansAugustUpdate.md
deleted file mode 100644
index 4fd4ea7f3..000000000
--- a/_posts/2020-09-06-KrystiansAugustUpdate.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's August Update
-author-id: krystian
----
-
-# Boost.JSON
-
-Boost.JSON is officially scheduled for review! It starts on September 14th, so there isn't much time left to finish up polishing the library -- but it looks like we will make the deadline.
-
-## Optimize, optimize, optimize
-
-Boost.JSON's performance has significantly increased in the past month. The change to the parsing functions where we pass and return `const char*` instead of `result` (detailed in my last post) was merged, bringing large gains across the board. After this, my work on optimizing `basic_parser` was complete (for now...), save for a few more minor changes:
-
-- The handler is stored as the first data member as opposed to passing a reference to each parse function. This means that the `this` pointer for `basic_parser` is the `this` pointer for the handler, which eliminates some register spills.
-
-- The parser's depth (i.e. nesting level of objects/arrays) is now tracked as `max_depth - actual_depth`, meaning that we don't have to read `max_depth` from memory each time a structure is parsed.
-
-- `parse_string` was split into two functions: `parse_unescaped` and `parse_escaped`. The former is much cheaper to call as it doesn't have to store the string within a local buffer, and since unescaped strings are vastly more common in JSON documents, this increases performance considerably.
-
-### The DOM parser
-
-Our old implementation of `parser` was pretty wasteful. It stored state information (such as whether we were parsing an object or array), keys, and values, all on one stack. This proved to be quite a pain when it came to unwinding it and also required us to align the stack when pushing arrays and objects.
-
-Several months ago, Vinnie and I tried to figure out how to make the homogeneous but came to a dead end. I decided to revisit the idea, and after some experimentation, it became apparent that there was a *lot* of redundancy in the implementation. For example, `basic_parser` already keeps track of the current object/array/string/key size, so there is no reason to so within `parser`. The state information we were tracking was also not needed -- `basic_parser` already checks the syntactic correctness of the input. That left one more thing: strings and keys.
-
-My rudimentary implementation required two stacks: one for keys and strings, and the other for values. Other information, such as the sizes of objects and arrays, were obtained from `basic_parser`. My implementation, though primitive, gave some promising results on the benchmarks: up to 10% for certain documents. After some brainstorming with Vinnie, he had the idea of storing object keys as values; the last piece of the puzzle we needed to make this thing work.
-
-His fleshed-out implementation was even faster. In just a week's time, Boost.JSON's performance increased by some 15%. I'm still working on the finishing touches, but the results are looking promising.
-
-## More UTF-8 validation malarkey
-
-Out of all the things I've worked on, nothing has proved as frustrating as UTF-8 validation. The validation itself is trivial; but making it work with an incremental parser is remarkably difficult. Shortly after merging the feature, [an issue was opened](https://github.com/CPPAlliance/json/issues/162); while validation worked just fine when a document was parsed without suspending, I neglected to write tests for incremental parsing, and that's precisely where the bug was. Turns out, if parsing suspended while validating a UTF-8 byte sequence, the handler just would not be called.
-
-This was... quite a problem to say the least, and required me to reimplement UTF-8 validation from scratch -- but with a twist. We don't want to pass partial UTF-8 sequences because it just transfers the burden of assembling incomplete sequences to the handler. This means that we need to store the sequences, append to them until we get a complete codepoint, and only then can we validate and send it off to the handler. Doing this in an efficient manner proved to be quite challenging, so I ended up with a "fix" that was 50% code and 50% `// KRYSTIAN TODO: this can be optimized`. The tests provided in the issue finally passed, so the patch was merged.
-
-I thought my woes with validation were over, but I was wrong. Just over a week later, a new issue rolled in:
-
-[Handler not invoked correctly in multi-byte UTF8 sequences, part 2](https://github.com/CPPAlliance/json/issues/162)
-
-Luckily, fixing this didn't require another rewrite. This taught me a fine lesson in exhaustive testing.
diff --git a/_posts/2020-09-29-KrystiansSeptemberUpdate.md b/_posts/2020-09-29-KrystiansSeptemberUpdate.md
deleted file mode 100644
index f7acc6475..000000000
--- a/_posts/2020-09-29-KrystiansSeptemberUpdate.md
+++ /dev/null
@@ -1,99 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's September Update
-author-id: krystian
----
-
-# Reviewing the review
-
-The review period for Boost.JSON has come and gone, and we got some great feedback on the design of the library. Glancing over the results, it appears that the general mood was to accept the library. This doesn't mean that there weren't any problem areas -- most notably the documentation, which often did contain the information people wanted, but it was difficult to find.
-
-Other points of contention were the use of a push parser as opposed to a pull parser, the use of `double`, `uint64_t`, and `int64_t` without allowing for users to change them, and the value conversion interface. Overall some very good points were made, and I'd like to thank everyone for participating in the review.
-
-# Customizing the build
-
-I put a bit of work into improving our CI matrix, as it had several redundant configurations and did not test newer compiler versions (e.g. GCC 10, clang 11), nor did we have any 32-bit jobs. The most difficult thing about working on the build matrix is balancing how exhaustive it is with the turnaround time -- sure, we could add 60 configurations that test x86, x86-64, and ARM on every major compiler version released since 2011, but the turnaround would be abysmal.
-
-To alleviate this, I only added 32-bit jobs for the sanitizers that use a recent version of GCC. It's a less common configuration in the days of 64-bit universality, and if 64 bit works then it's highly likely that 32 bit will "just work" as well.
-
-
-Here's a table of the new Travis configurations that will be added:
-
-| Compiler | Library | C++ Standard | Variant | OS | Architecture | Job |
-|:-------------:|:---------:|:------------:|:----------:|:--------------:|:------------:|:-----------------:|
-| --- | --- | --- | Boost | Linux (Xenial) | x86-64 | Documentation |
-| gcc 8.4.0 | libstdc++ | 11 | Boost | Linux (Xenial) | x86-64 | Coverage |
-| clang 6.0.1 | libstdc++ | 11, 14 | Boost | Linux (Xenial) | x86-64 | Valgrind |
-| clang 11.0.0 | libstdc++ | 17 | Boost | Linux (Xenial) | x86-64 | Address Sanitizer |
-| clang 11.0.0 | libstdc++ | 17 | Boost | Linux (Xenial) | x86-64 | UB Sanitizer |
-| msvc 14.1 | MS STL | 11, 14, 17 | Boost | Windows | x86-64 | --- |
-| msvc 14.1 | MS STL | 17, 2a | Standalone | Windows | x86-64 | --- |
-| msvc 14.2 | MS STL | 17, 2a | Boost | Windows | x86-64 | --- |
-| msvc 14.2 | MS STL | 17, 2a | Standalone | Windows | x86-64 | --- |
-| icc 2021.1 | libstdc++ | 11, 14, 17 | Boost | Linux (Bionic) | x86-64 | --- |
-| gcc 4.8.5 | libstdc++ | 11 | Boost | Linux (Trusty) | x86-64 | --- |
-| gcc 4.9.4 | libstdc++ | 11 | Boost | Linux (Trusty) | x86-64 | --- |
-| gcc 5.5.0 | libstdc++ | 11 | Boost | Linux (Xenial) | x86-64 | --- |
-| gcc 6.5.0 | libstdc++ | 11, 14 | Boost | Linux (Xenial) | x86-64 | --- |
-| gcc 7.5.0 | libstdc++ | 14, 17 | Boost | Linux (Xenial) | x86-64 | --- |
-| gcc 8.4.0 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| gcc 9.3.0 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| gcc 9.3.0 | libstdc++ | 17, 2a | Standalone | Linux (Xenial) | x86-64 | --- |
-| gcc 10.2.0 | libstdc++ | 17, 2a | Boost | Linux (Focal) | x86-64 | --- |
-| gcc 10.2.0 | libstdc++ | 17, 2a | Standalone | Linux (Focal) | x86-64 | --- |
-| gcc (trunk) | libstdc++ | 17, 2a | Boost | Linux (Focal) | x86-64 | --- |
-| gcc (trunk) | libstdc++ | 17, 2a | Standalone | Linux (Focal) | x86-64 | --- |
-| clang 3.8.0 | libstdc++ | 11 | Boost | Linux (Trusty) | x86-64 | --- |
-| clang 4.0.0 | libstdc++ | 11, 14 | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 5.0.2 | libstdc++ | 11, 14 | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 6.0.1 | libstdc++ | 14, 17 | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 7.0.1 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 9.0.1 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 9.0.1 | libstdc++ | 17, 2a | Standalone | Linux (Xenial) | x86-64 | --- |
-| clang 10.0.1 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 10.0.1 | libstdc++ | 17, 2a | Standalone | Linux (Xenial) | x86-64 | --- |
-| clang 11.0.0 | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| clang 11.0.0 | libstdc++ | 17, 2a | Standalone | Linux (Xenial) | x86-64 | --- |
-| clang (trunk) | libstdc++ | 17, 2a | Boost | Linux (Xenial) | x86-64 | --- |
-| clang (trunk) | libstdc++ | 17, 2a | Standalone | Linux (Xenial) | x86-64 | --- |
-
-I think it strikes a good balance between exhaustiveness and turnaround time, and we now test the most recent compiler versions to make sure they won't cause problems on the cutting edge.
-
-# Binary size
-
-It doesn't matter how good a library is if it's too big to use within your environment. As with all things in computer science, there is a trade-off between size and speed; seldom can you have both. We have been exploring options to reduce the size of the binary, and this mostly involved removing a lot of the pre-written tables we have (such as the ever-controversial jump table), since it allows the compiler to take into account the specific options it was past and optimize for those constraints (i.e. size and speed) rather than hard-coding in a set configuration as we did with the jump tables.
-
-Peter Dimov also helped out by transitioning our compile-time system of generating unique parse functions for each permutation of extensions to a runtime system, which drastically decreases the binary size without affecting performance too much.
-
-I must admit I'm not the biggest fan of these changes, but it's important to support the use of Boost.JSON in embedded environments. As Peter has said time and time again: don't overfit for a particular use-case or configuration.
-
-Another place with room for improvement is with string to float-point conversions. Right now we calculate a mantissa and base-10 exponent, then lookup the value in a massive table that contains pre-calculated powers of 10 from 1e-308 to 1e+308. As you can surmise, this takes up a substantial amount of space (8 bytes * 618 elements = 4.95 kb).
-
-Here is a boiled down version of how we currently perform the conversion:
-
-```cpp
-double calculate_float(
- std::uint64_t mantissa,
- std::uint32_t exponent,
- bool sign)
-{
- constexpr static double table[618] =
- {
- 1e-308, 1e-307,
- ...,
- 1e307, 1e308
- };
- double power;
- if(exponent < -308 || exponent > 308)
- power = std::pow(10.0, exponent);
- else
- power = table[exponent + 308]
- double result = mantissa * power;
- return sign ? -result : result;
-}
-```
-
-To further reduce the size of the binary, Peter suggested that we instead calculate `power` as `10^floor(exponent / 8) * 10^(exponent mod 8)`. Yes, the division operations there might look expensive, but any decent optimizing compiler will transform `exponent / 8` to `exponent >> 3`, and `exponent mod 8` to `exponent & 7`. This does introduce another multiplication instruction, but at the same time, it makes our table 8 times smaller. In theory, the slight drop in performance is worth the significant reduction in binary size.
-
\ No newline at end of file
diff --git a/_posts/2020-09-30-RichardsSeptemberUpdate.md b/_posts/2020-09-30-RichardsSeptemberUpdate.md
deleted file mode 100644
index d6a64cde6..000000000
--- a/_posts/2020-09-30-RichardsSeptemberUpdate.md
+++ /dev/null
@@ -1,608 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's September Update
-author-id: richard
----
-
-# Cancellation in Beast/Asio and Better Compile Performance with Beast.Websocket
-
-This month I will be discussing two issues. One of interest to many people who come to us with questions on the
-[Github Issue Tracker](https://github.com/boostorg/beast/issues) and the #beast channel of
-[Cpplang Slack](https://cppalliance.org/slack/).
-
-## Compile Times and Separation of Concerns
-
-A common complaint about Boost.Beast is that compilation units that use the `websocket::stream` template class
-often take a long time to compile, and that because websocket::stream is a template, this compilation overhead can
-become viral in an application.
-
-This is a valid complaint and we believe there are some reasonable tradeoffs we can make by refactoring the websocket
-stream to use fewer templates internally. Vinnie has started work to express the WebSocket's
-intermediate completion handlers, buffer sequence and executor in terms of a polymorphic object. This would mean a
-few indirect jumps in the compiled code but would significantly reduce the number of internal template expansions.
-In the scheme of things, we don't believe that the virtual function calls will materially affect runtime performance.
-The branch is [here](https://github.com/vinniefalco/beast/tree/async-an)
-
-I will be continuing work in this area in the coming days.
-
-In the meantime, our general response is to suggest that users create a base class to handle the transport, and
-communicate important events such as frame received, connection state and the close notification to a derived
-application-layer class through a private polymorphic interface.
-
-In this way, the websocket transport compilation unit may take a while to compile, but it needs to be done only once
-since the transport layer will rarely change during the development life of an application. Whenever there is a change
-to the application layer, the transport layer is not affected so websocket-related code is not affected.
-
-This approach has a number of benefits. Not least of which is that developing another client implementation over
-a different websocket connection in the same application becomes trivial.
-
-Another benefit is that the application can be designed such that application-level concerns are agnostic of the
-transport mechanism. Such as when the server can be accessed by multiple means - WSS, WS, long poll, direct connection,
-unix sockets and so on.
-
-In this blog I will present a simplified implementation of this idea. My thanks to the cpplang Slack user `@elegracer`
-who most recently asked for guidance on reducing compile times. It was (his/her? Slack is silent on the matter) question
-which prompted me to finally conjure up a demo. `@elegracer`'s problem was needing to connect to multiple cryptocurrency
-exchanges in the same app over websocket. In this particular example I'll demonstrate a simplified connection to
-the public FMex market data feed since that was the subject of the original question.
-
-## Correct Cancellation
-
-Our examples in the Beast Repository are rudimentary and don't cover the issue of graceful shutdown of an application
-in response to a SIGINT (i.e. the user pressing ctrl-c). It is common for simple programs to exit suddenly in response
-to this signal, which is the default behaviour. For many applications, this is perfectly fine but not all. We may want
-active objects in the program to write data to disk, we may want to ensure that the underlying websocket is
-shut down cleanly and we may want to give the user an opportunity to prevent the shutdown.
-
-I will further annotate the example by providing this ability to prevent the shutdown. The user will have to confirm the
-first SIGINT with another within 5 seconds to confirm.
-
-# Designing the application
-
-When I write IO applications involving Asio and Beast, I prefer to create an "application" object. This has the
-responsibility of monitoring signals and starting the initial connection objects. It also provides the communication
-between the two.
-
-The construction and configuration of the `io_context` and `ssl::context` stay in `main()`. The executor and ssl context
-are passed to the application by reference as dependencies. The application can then pass on these refrences as
-required. It is also worth mentioning that I don't pass the io_context's executor as a polymorphic `any_io_executor`
-type at this stage. The reason is that I may want in future to upgrade my program to be multi-threaded. If I do this,
-then each individual io_enabled object such as a connection or the application will need to have its _own_ strand.
-Getting the strand out of an any_io_executor is not possible in the general case as it will have been type-erased, so
-for top level objects I pass the executor as `io_context::executor_type`. It is then up to each object to create its own
-strand internally which will have the type `strand`. The `strand` type provides the method
-`get_inner_executor` which allows the application to extract the underlying `io_context::executor_type` and pass it to
-the constructor of any subordinate but otherwise self-contained io objects. The subordinates can then build their own
-strands from this.
-
-## Step 1 - A Simple Application Framework That Supports ctrl-c
-
-OK, let's get started and build the framework. Here's a link to
-[step 1](https://github.com/test-scenarios/boost_beast_websocket_echo/tree/blog-2020-09-step-1/pre-cxx20/blog-2020-09).
-
-`ssl.hpp` and `net.hpp` simply configure the project to use boost.asio. The idea of these little configuration headers
-is that they could be generated by the cmake project if necessary to allow the option of upgrading to std networking
-if it ever arrives.
-
-As a matter of style, I like to ensure that no names are created in the global namespace other than `main`. This saves
-headaches that could occur if I wrote code on one platform, but then happened to port it to another where the name
-was already in use by the native system libraries.
-
-`main.cpp` simply creates the io execution context and a default ssl context, creates the application, starts it and
-runs the io context.
-
-At the moment, the only interesting part of our program is the `signit_state`. This is a state machine which handles the
-behaviour of the program when a `SIGINT` is received. Our state machine is doing something a little fancy. Here is the
-state diagram:
-
-![sigint_state](/images/posts/richard/2020-09-sigint-state.png)
-
-Rather than reproduce the code here, please refer to
-[step 1](https://github.com/test-scenarios/boost_beast_websocket_echo/tree/blog-2020-09-step-1/pre-cxx20/blog-2020-09)
-to see the source code.
-
-At this point the program will run and successfully handle ctrl-c:
-
-```
-$ ./blog_2020_09
-Application starting
-Press ctrl-c to interrupt.
-^CInterrupt detected. Press ctrl-c again within 5 seconds to exit
-Interrupt unconfirmed. Ignoring
-^CInterrupt detected. Press ctrl-c again within 5 seconds to exit
-^CInterrupt confirmed. Shutting down
-```
-
-## Step 2 - Connecting to an Exchange
-
-Now we need to create our WebSocket transport class and our FMex exchange protocol class that will derive from it.
-For now we won't worry about cancellation - we'll retrofit that in Step 3.
-
-Here is the code for
-[step 2](https://github.com/test-scenarios/boost_beast_websocket_echo/tree/blog-2020-09-step-2/pre-cxx20/blog-2020-09).
-
-This section introduces two new main classes - the `wss_transport` and the `fmex_connection`. In addition, the connection
-phase of the wss_transport is expressed as a composed operation for exposition purposes (and in my opinion it actually
-makes the code easier to read than continuation-passing style code)
-
-Here is the implementation of the connect coroutine:
-
-```cpp
- struct wss_transport::connect_op : asio::coroutine
- {
- using executor_type = wss_transport::executor_type;
- using websock = wss_transport::websock;
-```
-Here we define the _implementation_ of the coroutine - this is an object which will not be moved for the duration of the
-execution of the coroutine. This address stability is important because intermediate asynchronous operations will rely
-on knowing the address of the resolver (and later perhaps other io objects).
-```cpp
- struct impl_data
- {
- impl_data(websock & ws,
- std::string host,
- std::string port,
- std::string target)
- : ws(ws)
- , resolver(ws.get_executor())
- , host(host)
- , port(port)
- , target(target)
- {
- }
-
- layer_0 &
- tcp_layer() const
- {
- return ws.next_layer().next_layer();
- }
-
- layer_1 &
- ssl_layer() const
- {
- return ws.next_layer();
- }
-
- websock & ws;
- net::ip::tcp::resolver resolver;
- net::ip::tcp::resolver::results_type endpoints;
- std::string host, port, target;
- };
-```
-The constructor merely forwards the arguments to the construction of the `impl_data`.
-```cpp
- connect_op(websock & ws,
- std::string host,
- std::string port,
- std::string target)
- : impl_(std::make_unique< impl_data >(ws, host, port, target))
- {
- }
-
-```
-This coroutine is both a composed operation and a completion handler for sub-operations. This means it must have an
-`operator()` interface matching the requirements of each sub-operation. During the lifetime of this coroutine we
-will be using the resolver and calling `async_connect` on the `tcp_stream`. We therefore provide conforming member
-functions which store or ignore the and forward the `error_code` to the main implementation of the coroutine.
-```cpp
- template < class Self >
- void
- operator()(Self & self,
- error_code ec,
- net::ip::tcp::resolver::results_type results)
- {
- impl_->endpoints = results;
- (*this)(self, ec);
- }
-
- template < class Self >
- void
- operator()(Self &self, error_code ec, net::ip::tcp::endpoint const &)
- {
- (*this)(self, ec);
- }
-```
-Here is the main implementation of the coroutine. Note that the last two parameters provide defaults. This is in order
-to allow this member function to match the completion handler signatures of:
-* `void()` - invoked during async_compose in order to start the coroutine.
-* `void(error_code)` - invoked by the two functions above and by the async handshakes.
-* `void(error_code, std::size_t)` - invoked by operations such as async_read and async_write although not strictly
-necessary here.
-```cpp
- template < class Self >
- void operator()(Self &self, error_code ec = {}, std::size_t = 0)
- {
-```
-Note that here we are checking the error code before re-entering the coroutine. This is a shortcut which allows us to
-omit error checking after each sub-operation. This check will happen on every attempt to re-enter the coroutine,
-including the first entry (at which time `ec` is guaranteed to be default constructed).
-```cpp
- if (ec)
- return self.complete(ec);
-
- auto &impl = *impl_;
-```
-Note the use of the asio yield and unyield headers to create the fake 'keywords' `reenter` and `yield` in avery limited
-scope.
-```cpp
-#include
- reenter(*this)
- {
- yield impl.resolver.async_resolve(
- impl.host, impl.port, std::move(self));
-
- impl.tcp_layer().expires_after(15s);
- yield impl.tcp_layer().async_connect(impl.endpoints,
- std::move(self));
-
- if (!SSL_set_tlsext_host_name(impl.ssl_layer().native_handle(),
- impl.host.c_str()))
- return self.complete(
- error_code(static_cast< int >(::ERR_get_error()),
- net::error::get_ssl_category()));
-
- impl.tcp_layer().expires_after(15s);
- yield impl.ssl_layer().async_handshake(ssl::stream_base::client,
- std::move(self));
-
- impl.tcp_layer().expires_after(15s);
- yield impl.ws.async_handshake(
- impl.host, impl.target, std::move(self));
-```
-If the coroutine is re-entered here, it must be because there was no error (if there was an error, it would have been
-caught by the pre-reentry error check above). Since execution has resumed here in the completion handler of the
-`async_handshake` initiating function, we are guaranteed to be executing in the correct executor. Therefore we can
-simply call `complete` directly without needing to post to an executor. Note that the `async_compose` call which will
-encapsulate the use of this class embeds this object into a wrapper which provides the `executor_type` and
-`get_executor()` mechanism which asio uses to determine on which executor to invoke completion handlers.
-```cpp
- impl.tcp_layer().expires_never();
- yield self.complete(ec);
- }
-#include
- }
-
- std::unique_ptr< impl_data > impl_;
- };
-```
-
-The `wss_connection` class provides the bare bones required to connect a websocket and maintain the connection. It
-provides a protected interface so that derived classes can send text frames and it will call private virtual functions
-in order to notify the derived class of:
-* transport up (websocket connection established).
-* frame received.
-* connection error (either during connection or operation).
-* websocket close - the server has requested or agreed to a graceful shutdown.
-
-Connection errors will only be notified once, and once a connection error has been indicated, no other event will reach
-the derived class.
-
-One of the many areas that trips up asio/beast beginners is that care must be taken to ensure that only one `async_write`
-is in progress at a time on the WebSocket (or indeed any async io object). For this reason we implement a simple
-transmit queue state which can be considered to be an orthogonal region (parallel task) to the read state.
-
-```cpp
- // send_state - data to control sending data
-
- std::deque send_queue_;
- enum send_state
- {
- not_sending,
- sending
- } send_state_ = not_sending;
-```
-
-You will note that I have used a `std::deque` to hold the pending messages. Although a deque has theoretically better
-complexity when inserting or removing items at the ends than a vector, this is not the reason for choosing this data
-structure. The actual reason is that items in a deque are guaranteed to have a stable address, even when other items
-are added or removed. This is useful as it means we don't have to move frames out of the transmit queue in order to
-send them. Remember that during an `async_write`, the data to which the supplied buffer sequence refers must have a
-stable address.
-
-Here are the functions that deal with the send state transitions.
-```cpp
- void
- wss_transport::send_text_frame(std::string frame)
- {
- if (state_ != connected)
- return;
-
- send_queue_.push_back(std::move(frame));
- start_sending();
- }
-
- void
- wss_transport::start_sending()
- {
- if (state_ == connected && send_state_ == not_sending &&
- !send_queue_.empty())
- {
- send_state_ = sending;
- websock_.async_write(net::buffer(send_queue_.front()),
- [this](error_code const &ec, std::size_t bt) {
- handle_send(ec, bt);
- });
- }
- }
-
- void
- wss_transport::handle_send(const error_code &ec, std::size_t)
- {
- send_state_ = not_sending;
-
- send_queue_.pop_front();
-
- if (ec)
- event_transport_error(ec);
- else
- start_sending();
- }
-```
-
-Finally, we can implement our specific exchange protocol on top of the `wss_connection`. In this case, FMex eschews
-the ping/pong built into websockets and requires a json ping/pong to be initiated by the client.
-
-```cpp
- void
- fmex_connection::ping_enter_state()
- {
- BOOST_ASSERT(ping_state_ == ping_not_started);
- ping_enter_wait();
- }
-
- void
- fmex_connection::ping_enter_wait()
- {
- ping_state_ = ping_wait;
-
- ping_timer_.expires_after(5s);
-
- ping_timer_.async_wait([this](error_code const &ec) {
- if (!ec)
- ping_event_timeout();
- });
- }
-
- void
- fmex_connection::ping_event_timeout()
- {
- ping_state_ = ping_waiting_pong;
-
- auto frame = json::value();
- auto &o = frame.emplace_object();
- o["cmd"] = "ping";
- o["id"] = "my_ping_ident";
- o["args"].emplace_array().push_back(timestamp());
- send_text_frame(json::serialize(frame));
- }
-
- void
- fmex_connection::ping_event_pong(json::value const &frame)
- {
- ping_enter_wait();
- }
-```
-
-Note that since we have implemented frame transmission in the base class in terms of a queue, the fmex class has no
-need to worry about ensuring the one-write-at-a-time rule. The base class handles it. This makes the application
-developer's life easy.
-
-Finally, we implement `on_text_frame` and write a little message parser and switch. Note that this function may throw.
-The base class will catch any exceptions thrown here and ensure that the `on_transport_error` event will be called at
-the appropriate time. Thus again, the application developer's life is improved as he doesn't need to worry about
-handling exceptions in an asynchronous environment.
-```cpp
- void
- fmex_connection::on_text_frame(std::string_view frame)
- try
- {
- auto jframe =
- json::parse(json::string_view(frame.data(), frame.size()));
-
- // dispatch on frame type
-
- auto &type = jframe.as_object().at("type");
- if (type == "hello")
- {
- on_hello();
- }
- else if (type == "ping")
- {
- ping_event_pong(jframe);
- }
- else if (type.as_string().starts_with("ticker."))
- {
- fmt::print(stdout,
- "fmex: tick {} : {}\n",
- type.as_string().subview(7),
- jframe.as_object().at("ticker"));
- }
- }
- catch (...)
- {
- fmt::print(stderr, "text frame is not json : {}\n", frame);
- throw;
- }
-```
-
-Compiling and running the program produces output similar to this:
-
-```
-Application starting
-Press ctrl-c to interrupt.
-fmex: initiating connection
-fmex: transport up
-fmex: hello
-fmex: tick btcusd_p : [1.0879E4,1.407E3,1.0879E4,2.28836E5,1.08795E4,1.13E2,1.0701E4,1.0939E4,1.0663E4,2.51888975E8,2.3378048830533768E4]
-fmex: tick btcusd_p : [1.08795E4,1E0,1.0879E4,3.79531E5,1.08795E4,3.518E3,1.0701E4,1.0939E4,1.0663E4,2.51888976E8,2.3378048922449758E4]
-fmex: tick btcusd_p : [1.0879E4,2E0,1.0879E4,3.7747E5,1.08795E4,7.575E3,1.0701E4,1.0939E4,1.0663E4,2.51888978E8,2.3378049106290182E4]
-fmex: tick btcusd_p : [1.0879E4,2E0,1.0879E4,3.77468E5,1.08795E4,9.229E3,1.0701E4,1.0939E4,1.0663E4,2.5188898E8,2.337804929013061E4]
-fmex: tick btcusd_p : [1.0879E4,1E0,1.0879E4,1.0039E4,1.08795E4,2.54203E5,1.0701E4,1.0939E4,1.0663E4,2.51888981E8,2.3378049382050827E4]
-```
-
-Note however, that although pressing ctrl-c is noticed by the application, the fmex feed does not shut down in response.
-This is because we have not wired up a mechanism to communicate the `stop()` event to the implementation of the
-connection:
-
-```
-$ ./blog_2020_09
-Application starting
-Press ctrl-c to interrupt.
-fmex: initiating connection
-fmex: transport up
-fmex: hello
-fmex: tick btcusd_p : [1.0859E4,1E0,1.0859E4,6.8663E4,1.08595E4,4.1457E4,1.07125E4,1.0939E4,1.0667E4,2.58585817E8,2.3968266005011003E4]
-^CInterrupt detected. Press ctrl-c again within 5 seconds to exit
-fmex: tick btcusd_p : [1.08595E4,2E0,1.0859E4,5.9942E4,1.08595E4,4.3727E4,1.07125E4,1.0939E4,1.0667E4,2.58585819E8,2.3968266189181537E4]
-^CInterrupt confirmed. Shutting down
-fmex: tick btcusd_p : [1.08595E4,2E0,1.0859E4,5.9932E4,1.08595E4,4.0933E4,1.07125E4,1.0939E4,1.0667E4,2.58585821E8,2.396826637335208E4]
-fmex: tick btcusd_p : [1.0859E4,1E0,1.0859E4,6.2722E4,1.08595E4,4.0943E4,1.07125E4,1.0939E4,1.0667E4,2.58585823E8,2.3968266557531104E4]
-fmex: tick btcusd_p : [1.08595E4,1.58E2,1.0859E4,6.2732E4,1.08595E4,3.7953E4,1.07125E4,1.0939E4,1.0667E4,2.58585981E8,2.3968281107003917E4]
-^Z
-[1]+ Stopped ./blog_2020_09
-$ kill %1
-
-[1]+ Stopped ./blog_2020_09
-$
-[1]+ Terminated ./blog_2020_09
-```
-
-## Step 3 - Re-Enabling Cancellation
-
-You will remember from step 1 that we created a little class called `sigint_state` which notices that the application
-has received a sigint and checks for a confirming sigint before taking action. We also added a slot to this to pass the
-signal to the fmex connection:
-
-```cpp
- fmex_connection_.start();
- sigint_state_.add_slot([this]{
- fmex_connection_.stop();
- });
-```
-
-But we didn't put any code in `wss_transport::stop`. Now all we have to do is provide a function object within
-`wss_transport` that we can adjust whenever the current state changes:
-
-```cpp
- // stop signal
- std::function stop_signal_;
-```
-
-```cpp
- void
- wss_transport::stop()
- {
- net::dispatch(get_executor(), [this] {
- if (auto sig = boost::exchange(stop_signal_, nullptr))
- sig();
- });
- }
-```
-
-We will also need to provide a way for the connect operation to respond to the stop signal (the user might press
-ctrl-c while resolving for example).
-
-The way I have done this here is a simple approach, merely pass a reference to the `wss_transport` into the composed
-operation so that the operation can modify the function directly. There are other more scalable ways to do this, but
-this is good enough for now.
-
-The body of the coroutine then becomes:
-
-```cpp
- auto &impl = *impl_;
-
- if(ec)
- impl.error = ec;
-
- if (impl.error)
- return self.complete(impl.error);
-
-#include
- reenter(*this)
- {
- transport_->stop_signal_ = [&impl] {
- impl.resolver.cancel();
- impl.error = net::error::operation_aborted;
- };
- yield impl.resolver.async_resolve(
- impl.host, impl.port, std::move(self));
-
- //
-
- transport_->stop_signal_ = [&impl] {
- impl.tcp_layer().cancel();
- impl.error = net::error::operation_aborted;
- };
-
- impl.tcp_layer().expires_after(15s);
- yield impl.tcp_layer().async_connect(impl.endpoints,
- std::move(self));
-
- //
-
- if (!SSL_set_tlsext_host_name(impl.ssl_layer().native_handle(),
- impl.host.c_str()))
- return self.complete(
- error_code(static_cast< int >(::ERR_get_error()),
- net::error::get_ssl_category()));
-
- //
-
- impl.tcp_layer().expires_after(15s);
- yield impl.ssl_layer().async_handshake(ssl::stream_base::client,
- std::move(self));
-
- //
-
- impl.tcp_layer().expires_after(15s);
- yield impl.ws.async_handshake(
- impl.host, impl.target, std::move(self));
-
- //
-
- transport_->stop_signal_ = nullptr;
- impl.tcp_layer().expires_never();
- yield self.complete(impl.error);
- }
-#include
-```
-
-The final source code for
-[step 3 is here](https://github.com/test-scenarios/boost_beast_websocket_echo/tree/blog-2020-09-step-3/pre-cxx20/blog-2020-09).
-
-Stopping the program while connecting:
-
-```
-$ ./blog_2020_09
-Application starting
-Press ctrl-c to interrupt.
-fmex: initiating connection
-^CInterrupt detected. Press ctrl-c again within 5 seconds to exit
-^CInterrupt confirmed. Shutting down
-fmex: transport error : system : 125 : Operation canceled
-```
-
-And stopping the program while connected:
-
-```
-$ ./blog_2020_09
-Application starting
-Press ctrl-c to interrupt.
-fmex: initiating connection
-fmex: transport up
-fmex: hello
-^CInterrupt detected. Press ctrl-c again within 5 seconds to exit
-fmex: tick btcusd_p : [1.0882E4,1E0,1.0882E4,3.75594E5,1.08825E4,5.103E3,1.07295E4,1.0939E4,1.06785E4,2.58278146E8,2.3907706652603207E4]
-^CInterrupt confirmed. Shutting down
-closing websocket
-fmex: closed
-```
-
-# Future development
-
-Next month I'll refactor the application to use C++20 coroutines and we can see whether this makes developing
-event based systems easier and/or more maintainable.
-
-Thanks for reading.
diff --git a/_posts/2020-1-30-Gold-sponsor-of-C++-now.md b/_posts/2020-1-30-Gold-sponsor-of-C++-now.md
deleted file mode 100644
index 862bbdde9..000000000
--- a/_posts/2020-1-30-Gold-sponsor-of-C++-now.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: company, louis
-author-id: louis
-title: Gold sponsor of C++Now 2020
----
-The Alliance is a Gold sponsor for
-C++Now 2020. This
-conference is a gathering of C++ experts and enthusiasts from around
-the world in beautiful Aspen, Colorado from May 3, 2020 - May 8, 2020.
diff --git a/_posts/2020-10-31-RichardsOctoberUpdate.md b/_posts/2020-10-31-RichardsOctoberUpdate.md
deleted file mode 100644
index 8acf62c4e..000000000
--- a/_posts/2020-10-31-RichardsOctoberUpdate.md
+++ /dev/null
@@ -1,906 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's October Update
-author-id: richard
----
-
-# Asio Coroutines in Qt applications!
-
-I started this train of thought when I wanted to hook up some back-end style code that I had written to a gui front end.
-One way to do this would be to have a web front end subscribing to a back-end service, but I am no expert in modern web
-technologies so rather than spend time learning something that wasn't C++ I decided to reach for the
-popular-but-so-far-unused-by-me C++ GUI framework, Qt.
-
-The challenge was how to hook up Qt, which is an event driven framework to a service written with Asio C++ coroutines.
-
-In the end it turned out to be easier than I had expected. Here's how.
-
-## A simple Executor
-
-As mentioned in a previous blog, Asio comes with a full implementation of the
-[Unified Executors proposal](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0443r12.html). Asio coroutines
-are designed to be initiated and continued within an executor's execution context. So let's build an executor that will
-perform work in a Qt UI thread.
-
-The executor I am going to build will have to invoke completion handlers to Asio IO objects, so we need to make it
-compatible with `asio::any_io_executor`. This means it needs to have an associated
-[execution context](https://www.boost.org/doc/libs/1_74_0/doc/html/boost_asio/reference/execution_context.html).
-
-The execution context is going to ultimately perform work on a Qt Application, so it makes sense to capture a reference
-to the Application. Although Qt defines the macro `qApp` to resolve to a pointer to the "current" application, for
-testing and sanity purposes I prefer that all services I write allow dependency injection, so I'll arrange things so
-that the execution_context's constructor takes an optional pointer to an application. In addition, it will be convenient
-when writing components to not have to specifically create and pass an an execution context to windows within the Qt
-application so it makes sense to be able to provide access to a default context which references the default application.
-Here's a first cut:
-
-```cpp
-struct qt_execution_context : net::execution_context
- , boost::noncopyable
-{
- qt_execution_context(QApplication *app = qApp)
- : app_(app)
- {
- instance_ = this;
- }
-
- template
- void
- post(F f)
- {
- // todo
- }
-
- static qt_execution_context &
- singleton()
- {
- assert(instance_);
- return *instance_;
- }
-
-private:
- static qt_execution_context *instance_;
- QApplication *app_;
-};
-```
-
-This class will provide two services. The first is to provide the asio service infrastructure so that we can create
-timers, sockets etc that use executors associated with this context and the second is to allow the executor to actually
-dispatch work in a Qt application. This is the purpose of the `post` method.
-
-Now a Qt application is itself a kind of execution context - in that it dispatches QEvent objects to be handled by
-children of the application. We can use this infrastructure to ensure that work dispatched by this execution context
-actually takes place on the correct thread and at the correct time.
-
-In order for us to dispatch work to the application, we need to wrap our function into a QEvent:
-
-```cpp
-class qt_work_event_base : public QEvent
-{
-public:
- qt_work_event_base()
- : QEvent(generated_type())
- {
- }
-
- virtual void
- invoke() = 0;
-
- static QEvent::Type
- generated_type()
- {
- static int event_type = QEvent::registerEventType();
- return static_cast(event_type);
- }
-};
-
-template
-struct basic_qt_work_event : qt_work_event_base
-{
- basic_qt_work_event(F f)
- : f_(std::move(f))
- {}
-
- void
- invoke() override
- {
- f_();
- }
-
-private:
- F f_;
-};
-```
-As opposed to using a `std::function`, the `basic_qt_work_event` allows us to wrap a move-only function object, which is
-important when that object is actually an Asio completion handler. Completion handlers benefit from being move-only as
-it means they can carry move-only state. This makes them more versatile, and can often lead to improvements in
-execution performance.
-
-Now we just need to fill out the code for `qt_execution_context::post` and provide a mechanism in the Qt application to
-detect and dispatch these messages:
-
-```cpp
- template
- void
- post(F f)
- {
- // c++20 auto template deduction
- auto event = new basic_qt_work_event(std::move(f));
- QApplication::postEvent(app_, event);
- }
-```
-
-```cpp
-class qt_net_application : public QApplication
-{
- using QApplication::QApplication;
-
-protected:
- bool
- event(QEvent *event) override;
-};
-
-bool
-qt_net_application::event(QEvent *event)
-{
- if (event->type() == qt_work_event_base::generated_type())
- {
- auto p = static_cast(event);
- p->accept();
- p->invoke();
- return true;
- }
- else
- {
- return QApplication::event(event);
- }
-}
-```
-
-Note that I have seen on stack overflow the technique of invoking a function object in the destructor of the
-`QEvent`-derived event. This would mean no necessity of custom event handling in the `QApplication` but there are two
-problems that I can see with this approach:
-1. I don't know enough about Qt to know that this is safe and correct, and
-2. Executors-TS executors can be destroyed while there are still un-invoked handlers within them. The correct behaviour
-is to destroy these handlers without invoking them. If we put invocation code in the destructors, they will actually
-mass-invoke when the executor is destroyed, leading most probably to annihilation of our program by segfault.
-
-However, that being done, we can now write the executor to meet the minimal expectations of an asio executor which can
-be used in an `any_io_executor`.
-
-```cpp
-struct qt_executor
-{
- qt_executor(qt_execution_context &context = qt_execution_context::singleton()) noexcept
- : context_(std::addressof(context))
- {
- }
-
- qt_execution_context &query(net::execution::context_t) const noexcept
- {
- return *context_;
- }
-
- static constexpr net::execution::blocking_t
- query(net::execution::blocking_t) noexcept
- {
- return net::execution::blocking.never;
- }
-
- static constexpr net::execution::relationship_t
- query(net::execution::relationship_t) noexcept
- {
- return net::execution::relationship.fork;
- }
-
- static constexpr net::execution::outstanding_work_t
- query(net::execution::outstanding_work_t) noexcept
- {
- return net::execution::outstanding_work.tracked;
- }
-
- template < typename OtherAllocator >
- static constexpr auto query(
- net::execution::allocator_t< OtherAllocator >) noexcept
- {
- return std::allocator();
- }
-
- static constexpr auto
- query(net::execution::allocator_t< void >) noexcept
- {
- return std::allocator();
- }
-
- template
- void
- execute(F f) const
- {
- context_->post(std::move(f));
- }
-
- bool
- operator==(qt_executor const &other) const noexcept
- {
- return context_ == other.context_;
- }
-
- bool
- operator!=(qt_executor const &other) const noexcept
- {
- return !(*this == other);
- }
-
-private:
- qt_execution_context *context_;
-};
-
-
-static_assert(net::execution::is_executor_v);
-```
-
-Now all that remains is to write a subclass of some Qt Widget so that we can dispatch some work against it.
-
-```cpp
-class test_widget : public QTextEdit
-{
- Q_OBJECT
-public:
- using QTextEdit::QTextEdit;
-
-private:
- void
- showEvent(QShowEvent *event) override;
-
- void
- hideEvent(QHideEvent *event) override;
-
- net::awaitable
- run_demo();
-};
-
-void
-test_widget::showEvent(QShowEvent *event)
-{
- net::co_spawn(
- qt_executor(), [this] {
- return run_demo();
- },
- net::detached);
-
- QTextEdit::showEvent(event);
-}
-
-void
-test_widget::hideEvent(QHideEvent *event)
-{
- QWidget::hideEvent(event);
-}
-
-net::awaitable
-test_widget::run_demo()
-{
- using namespace std::literals;
-
- auto timer = net::high_resolution_timer(co_await net::this_coro::executor);
-
- for (int i = 0; i < 10; ++i)
- {
- timer.expires_after(1s);
- co_await timer.async_wait(net::use_awaitable);
- this->setText(QString::fromStdString(std::to_string(i + 1) + " seconds"));
- }
- co_return;
-}
-
-```
-
-Here is the code for [stage 1](https://github.com/madmongo1/blog-october-2020/tree/stage-1)
-
-And here is a screenshot of the app running:
-
-![app running](/images/posts/richard/2020-october/stage-1.png)
-
-## All very well...
-
-OK, so we have a coroutine running in a Qt application. This is nice because it allows us to express an event-driven
-system in terms of procedural expression of code in a coroutine.
-
-But what if the user closes the window before the coroutine completes?
-
-This application has created the window on the stack, but in a larger application, there will be multiple windows and
-they may open and close at any time. It is not unusual in Qt to delete a closed window. If the coroutine continues to
-run once the windows that's hosting it is deleted, we are sure to get a segfault.
-
-One answer to this is to maintain a sentinel in the Qt widget implementation, which prevents the continuation of the
-coroutine if destroyed. A `std::shared_ptr/weak_ptr` pair would seem like a sensible solution. Let's create an updated
-version of the executor:
-
-```cpp
-struct qt_guarded_executor
-{
- qt_guarded_executor(std::weak_ptr guard,
- qt_execution_context &context
- = qt_execution_context::singleton()) noexcept
- : context_(std::addressof(context))
- , guard_(std::move(guard))
- {}
-
- qt_execution_context &query(net::execution::context_t) const noexcept
- {
- return *context_;
- }
-
- static constexpr net::execution::blocking_t
- query(net::execution::blocking_t) noexcept
- {
- return net::execution::blocking.never;
- }
-
- static constexpr net::execution::relationship_t
- query(net::execution::relationship_t) noexcept
- {
- return net::execution::relationship.fork;
- }
-
- static constexpr net::execution::outstanding_work_t
- query(net::execution::outstanding_work_t) noexcept
- {
- return net::execution::outstanding_work.tracked;
- }
-
- template
- static constexpr auto
- query(net::execution::allocator_t) noexcept
- {
- return std::allocator();
- }
-
- static constexpr auto query(net::execution::allocator_t) noexcept
- {
- return std::allocator();
- }
-
- template
- void
- execute(F f) const
- {
- if (auto lock1 = guard_.lock())
- {
- context_->post([guard = guard_, f = std::move(f)]() mutable {
- if (auto lock2 = guard.lock())
- f();
- });
- }
- }
-
- bool
- operator==(qt_guarded_executor const &other) const noexcept
- {
- return context_ == other.context_ && !guard_.owner_before(other.guard_)
- && !other.guard_.owner_before(guard_);
- }
-
- bool
- operator!=(qt_guarded_executor const &other) const noexcept
- {
- return !(*this == other);
- }
-
-private:
- qt_execution_context *context_;
- std::weak_ptr guard_;
-};
-```
-
-Now we'll make a little boilerplate class that we can use as a base class in any executor-enabled object in Qt:
-
-```cpp
-struct has_guarded_executor
-{
- using executor_type = qt_guarded_executor;
-
- has_guarded_executor(qt_execution_context &ctx
- = qt_execution_context::singleton())
- : context_(std::addressof(ctx))
- {
- new_guard();
- }
-
- void
- new_guard()
- {
- static int x = 0;
- guard_ = std::shared_ptr(std::addressof(x),
- // no-op deleter
- [](auto *) {});
- }
-
- void
- reset_guard()
- {
- guard_.reset();
- }
-
- executor_type
- get_executor() const
- {
- return qt_guarded_executor(guard_, *context_);
- }
-
-private:
- qt_execution_context *context_;
- std::shared_ptr guard_;
-};
-```
-
-And we can modify the `test_widget` to use it:
-
-```cpp
-class test_widget
- : public QTextEdit
- , public has_guarded_executor
-{
- ...
-};
-
-void
-test_widget::showEvent(QShowEvent *event)
-{
- // stop all existing coroutines and create a new guard
- new_guard();
-
- // start our coroutine
- net::co_spawn(
- get_executor(), [this] { return run_demo(); }, net::detached);
-
- QTextEdit::showEvent(event);
-}
-
-void
-test_widget::hideEvent(QHideEvent *event)
-{
- // stop all coroutines
- reset_guard();
- QWidget::hideEvent(event);
-}
-```
-
-Now we'll update the application to allow the creation and deletion of our widget. For this I'll use the QMdiWindow
-and add a menu with an action to create new widgets.
-
-We are now able to create and destroy widgets at will, with no segfaults.
-
-![MDI app running](/images/posts/richard/2020-october/stage-2.png)
-
-If you look at the code, you'll also see that I've wired up a rudimentary signal/slot device to allow the coroutine to
-be cancelled early.
-
-```cpp
- // test_widget.hpp
-
- void
- listen_for_stop(std::function slot);
-
- void
- stop_all();
-
- std::vector> stop_signals_;
- bool stopped_ = false;
-
- // test_widget.cpp
-
- void
- test_widget::listen_for_stop(std::function slot)
- {
- if (stopped_)
- return slot();
-
- stop_signals_.push_back(std::move(slot));
- }
-
- void
- test_widget::stop_all()
- {
- stopped_ = true;
- auto copy = std::exchange(stop_signals_, {});
- for (auto &slot : copy) slot();
- }
-
- void
- test_widget::closeEvent(QCloseEvent *event)
- {
- stop_all();
- QWidget::closeEvent(event);
- }
-
- net::awaitable
- test_widget::run_demo()
- {
- using namespace std::literals;
-
- auto timer = net::high_resolution_timer(co_await net::this_coro::executor);
-
- auto done = false;
-
- listen_for_stop([&] {
- done = true;
- timer.cancel();
- });
-
- while (!done)
- {
- for (int i = 0; i < 10; ++i)
- {
- timer.expires_after(1s);
- auto ec = boost::system::error_code();
- co_await timer.async_wait(
- net::redirect_error(net::use_awaitable, ec));
- if (ec)
- {
- done = true;
- break;
- }
- this->setText(
- QString::fromStdString(std::to_string(i + 1) + " seconds"));
- }
-
- for (int i = 10; i--;)
- {
- timer.expires_after(250ms);
- auto ec = boost::system::error_code();
- co_await timer.async_wait(
- net::redirect_error(net::use_awaitable, ec));
- if (ec)
- {
- done = true;
- break;
- }
- this->setText(QString::fromStdString(std::to_string(i)));
- }
- }
- co_return;
- }
-
-```
-
-Apparently I am told that it's been a long-believed myth that Asio "doesn't do cancellation". This is of course,
-nonsense.
-
-Here's the code for [stage 2](https://github.com/madmongo1/blog-october-2020/tree/stage-2)
-
-## State of the Art
-
-It's worth mentioning that I wrote and tested this demo using clang-9 and the libc++ version of the standard library.
-I have also successfully tested clang-11 with coroutines (and concepts). As I understand it, recent versions of
-Visual Studio support both well. GCC 10 - although advertising support for coroutines - has given me trouble, exhibiting
-segfaults at run time.
-
-Apple Clang, of course, is as always well behind the curve with no support for coroutines. If you want to try this code
-on a mac, it's entirely possible as long as you ditch the Apple compiler and use the homebrew's clang:
-```
-brew install llvm
-```
-Clang will then be available in `/usr/local/opt/bin` and you will need to set your `CMAKE_CXX_COMPILER` CMake variable
-appropriately. For completeness, it's worth mentioning that I also installed Qt5 using homebrew. You will need to
-set `Qt5_DIR`. Something like this:
-
- ```
- cmake -H. -Bmy_build_dir -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/clang++ -DQt5_DIR=/usr/local/opt/qt5/lib/cmake/Qt5
-```
-
-### Going further
-
-Ok, so what if we want our Qt application to interact with some asio-based service running in another thread?
-
-For this I'm going to create a few boilerplate classes. The reason is that we're going to have multiple threads running
-and each thread is going to be executing multiple coroutines. Each coroutine has an associated executor and that
-executor is dispatching completion handlers (which for our purposes advance the progress of the coroutines) in one of
-the threads assigned to it.
-
-It is important that coroutines are able to synchronise with each other, similar to the way that threads synchronise
-with each other.
-
-In fact, it's reasonable to use the mental model that a coroutine is a kind of "thread".
-
-In standard C++, we have the class `std::condition_variable` which we can wait on for some condition to be fulfilled.
-If we were to produce a similar class for coroutines, then coroutines could co_await on each other. This could form the
-basis of an asynchronous event queue.
-
-First the condition_variable, implemented in terms of cancellation of an Asio timer to indicate readiness (thanks
-to Chris Kohlhoff - the author of Asio - for suggesting this and saving me having reach for another library or worse,
-write my own awaitable type!):
-
-```cpp
-struct async_condition_variable
-{
-private:
- using timer_type = net::high_resolution_timer;
-
-public:
- using clock_type = timer_type::clock_type;
- using duration = timer_type::duration;
- using time_point = timer_type::time_point;
- using executor_type = timer_type::executor_type;
-
- /// Constructor
- /// @param exec is the executor to associate with the internal timer.
- explicit inline async_condition_variable(net::any_io_executor exec);
-
- template
- [[nodiscard]]
- auto
- wait(Pred pred) -> net::awaitable;
-
- template
- [[nodiscard]]
- auto
- wait_until(Pred pred, time_point limit) -> net::awaitable;
-
- template
- [[nodiscard]]
- auto
- wait_for(Pred pred, duration d) -> net::awaitable;
-
- auto
- get_executor() noexcept -> executor_type
- {
- return timer_.get_executor();
- }
-
- inline void
- notify_one();
-
- inline void
- notify_all();
-
- /// Put the condition into a stop state so that all future awaits fail.
- inline void
- stop();
-
- auto
- error() const -> error_code const &
- {
- return error_;
- }
-
- void
- reset()
- {
- error_ = {};
- }
-
-private:
- timer_type timer_;
- error_code error_;
- std::multiset wait_times_;
-};
-
-template
-auto
-async_condition_variable::wait_until(Pred pred, time_point limit)
- -> net::awaitable
-{
- assert(co_await net::this_coro::executor == timer_.get_executor());
-
- while (not error_ and not pred())
- {
- if (auto now = clock_type::now(); now >= limit)
- co_return std::cv_status::timeout;
-
- // insert our expiry time into the set and remember where it is
- auto where = wait_times_.insert(limit);
-
- // find the nearest expiry time and set the timeout for that one
- auto when = *wait_times_.begin();
- if (timer_.expiry() != when)
- timer_.expires_at(when);
-
- // wait for timeout or cancellation
- error_code ec;
- co_await timer_.async_wait(net::redirect_error(net::use_awaitable, ec));
-
- // remove our expiry time from the set
- wait_times_.erase(where);
-
- // any error other than operation_aborted is unexpected
- if (ec and ec != net::error::operation_aborted)
- if (not error_)
- error_ = ec;
- }
-
- if (error_)
- throw system_error(error_);
-
- co_return std::cv_status::no_timeout;
-}
-
-template
-auto
-async_condition_variable::wait(Pred pred) -> net::awaitable
-{
- auto stat = co_await wait_until(std::move(pred), time_point::max());
- boost::ignore_unused(stat);
- co_return;
-}
-
-template
-auto
-async_condition_variable::wait_for(Pred pred, duration d)
- -> net::awaitable
-{
- return wait_until(std::move(pred), clock_type::now() + d);
-}
-
-async_condition_variable::async_condition_variable(net::any_io_executor exec)
- : timer_(std::move(exec))
- , error_()
-{}
-
-void
-async_condition_variable::notify_one()
-{
- timer_.cancel_one();
-}
-
-void
-async_condition_variable::notify_all()
-{
- timer_.cancel();
-}
-
-void
-async_condition_variable::stop()
-{
- error_ = net::error::operation_aborted;
- notify_all();
-}
-```
-
-For our purposes this one is a little too all-singing and all-dancing as it allows for timed waits from multiple
-coroutines. This is not needed in our example, but I happened to have the code handy from previous experiments.
-You will notice that I have marked the coroutines as `[[nodiscard]]`. This is to ensure that I don't forget to
-`co_await` them at the call site. I can't tell you how many times I have done that and then wondered why my program
-mysteriously freezes mid run.
-
-Having built the condition_variable, we now need some kind of waitable queue. I have implemented this in terms of some
-shared state which contains an `async_condition_variable` and some kind of queue. I have made the implementation of the
-queue a template function (another over-complication for our purposes). The template represents the strategy for
-accumulating messages before they have been consumed by the client. The strategy I have used here is a FIFO, which means
-that every message posted will be consumed in the order in which they were posted. But it could just as easily be a
-priority queue, or a latch - i.e. only storing the most recent message.
-
-The code to describe this machinery is a little long to put inline, but by all means look at the code:
-- [basic_connection](https://github.com/madmongo1/blog-october-2020/blob/stage-3/src/basic_connection.hpp)
-- [basic_distributor](https://github.com/madmongo1/blog-october-2020/blob/stage-3/src/basic_distributor.hpp)
-- [basic_shared_state](https://github.com/madmongo1/blog-october-2020/blob/stage-3/src/basic_shared_state.hpp)
-
-The next piece of machinery we need is the actual service that will be delivering messages. The code is more-or-less
-a copy/paste of the code that was in our widget because it's doing the same job - delivering messages, but this time
-via the basic_distributor.
-
-- [message_service.hpp](https://github.com/madmongo1/blog-october-2020/blob/stage-3/src/message_service.hpp)
-- [message_service.cpp](https://github.com/madmongo1/blog-october-2020/blob/stage-3/src/message_service.cpp)
-
-Note that the message_service class is a pimpl. Although it uses a shared_ptr to hold the impl's lifetime, it is itself
-non-copyable. When the message_service is destroyed, it will signal its impl to stop. The impl will last a little longer
-than the handle, while it shuts itself down.
-
-The main coroutine on the impl is called `run()` and it is initiated when the impl is created:
-
-```cpp
-message_service::message_service(const executor_type &exec)
- : exec_(exec)
- , impl_(std::make_shared(exec_))
-{
- net::co_spawn(
- impl_->get_executor(),
- [impl = impl_]() -> net::awaitable { co_await impl->run(); },
- net::detached);
-}
-```
-Note that the `impl` shared_ptr has been captured in the lambda. Normally we'd need to be careful here because the
-lambda is just a class who's `operator()` happens to be a coroutine. This means that the actual coroutine can outlive the
-lambda that initiated it, which means that `impl` could be destroyed before the coroutine finishes. For this reason
-it's generally safer to pass the impl to the coroutine as an argument, so that it gets decay_copied into the
-coroutine state.
-However, in this case we're safe. `net::co_spawn` will actually copy the lambda object before invoking it, guaranteeing
-- with asio at least - that the impl will survive the execution of the coroutine.
-
-And here's the `run()` coroutine:
-
-```cpp
-net::awaitable
-message_service_impl::run()
-{
- using namespace std::literals;
-
- auto timer
- = net::high_resolution_timer(co_await net::this_coro::executor);
-
- auto done = false;
-
- listen_for_stop([&] {
- done = true;
- timer.cancel();
- });
-
- while (!done)
- {
- for (int i = 0; i < 10 && !done; ++i)
- {
- timer.expires_after(1s);
- auto ec = boost::system::error_code();
- co_await timer.async_wait(
- net::redirect_error(net::use_awaitable, ec));
- if (ec)
- break;
- message_dist_.notify_value(std::to_string(i + 1) + " seconds");
- }
-
- for (int i = 10; i-- && !done;)
- {
- timer.expires_after(250ms);
- auto ec = boost::system::error_code();
- co_await timer.async_wait(
- net::redirect_error(net::use_awaitable, ec));
- if (ec)
- break;
- message_dist_.notify_value(std::to_string(i));
- }
- }
-}
-```
-Notice the `done` machinery allowing detection of a stop event. Remember that a stop event can arrive at any time. The
-first this coroutine will hear of it is when one of the timer `async_wait` calls is canceled. Note that the lambda
-passed to `listen_for_stop` _is not actually part of the coroutine_. It is a separate function that just happens to
-refer to the same state that the coroutine refers to. The communication between the two is via the timer cancellation
-and the `done` flag. This communication is guaranteed not to race because both the coroutine and the lambda are executed
-by the same `strand`.
-
-Finally we need to modify the widget:
-
-```cpp
-net::awaitable
-test_widget::run_demo()
-{
- using namespace std::literals;
-
- auto service = message_service(ioexec_);
- auto conn = co_await service.connect();
-
- auto done = false;
-
- listen_for_stop([&] {
- done = true;
- conn.disconnect();
- service.reset();
- });
-
- while (!done)
- {
- auto message = co_await conn.consume();
- this->setText(QString::fromStdString(message));
- }
- co_return;
-}
-```
-
-This coroutine will exit via exception when the distributor feeding the connection is destroyed. This will happen when
-the impl of the service is destroyed.
-
-Here is the final code for [stage 3](https://github.com/madmongo1/blog-october-2020/tree/stage-3).
-
-I've covered quite a few topics here and I hope this has been useful and interesting for people interested in exploring
-coroutines and the think-async mindset.
-
-There are a number of things I have not covered, the most important of which is improving the (currently very basic)
-`qt_guarded_executor` to improve its performance. At the present time, whether you call `dispatch` or `post` referencing
-this executor type, a post will actually be performed. Perhaps next month I'll revisit and add the extra machinery to
-allow `net::dispatch(e, f)` to offer straight-through execution if we're already on the correct Qt thread.
-
-If you have any questions or suggestions I'm happy to hear them. You can generally find me in the `#beast` channel
-on [cpplang slack](https://cppalliance.org/slack/) or if you prefer you can either email [me](mailto:hodges.r@gmail.com)
-or create an issue on [this repo](https://github.com/madmongo1/blog-october-2020/issues).
diff --git a/_posts/2020-12-22-RichardsDecemberUpdate.md b/_posts/2020-12-22-RichardsDecemberUpdate.md
deleted file mode 100644
index 62db8804e..000000000
--- a/_posts/2020-12-22-RichardsDecemberUpdate.md
+++ /dev/null
@@ -1,475 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's November/December Update
-author-id: richard
----
-
-# A Coroutine Websocket Using Boost Beast
-
-This month I thought I would present a little idea that I had a few months ago.
-
-Boost.Beast is a very comprehensive and competent websocket implementation, but it is not what you might call
-"straightforward" to use unless you are already wise in the ways of Asio.
-
-Beast's documentation and design makes no apology for this. There is a disclaimer in the
-[documentation](https://www.boost.org/doc/libs/1_75_0/libs/beast/doc/html/beast/using_io/asio_refresher.html):
-> To use Beast effectively, a prior understanding of Networking is required.
-
-This is worth taking seriously (and if you are not fully aware of how Asio works with respect to the posting of
-completion handlers onto the associated executor, this page is worth studying).
-
-## The Interface
-
-I wanted to model my websocket object's interface roughly on the javascript websocket connection interface. There will
-be a few differences of course, because the javascript version uses callbacks (or continuations) and I will be using
-C++ coroutines that execute on an Asio executor. In underlying implementation, these concepts are not actually that far
-apart, since Asio awaitables are actually implemented in terms of the normal asio completion token/handler interaction.
-
-Furthermore, I want my WebSocket's connection phase to be cancellable.
-
-My websocket interface will look something like this:
-
-```cpp
-namespace websocket
-{
- struct message
- {
- bool is_binary() const;
- bool is_text() const;
- std::string_view operator*() const;
- };
-
- struct event
- {
- bool is_error() const;
- bool is_message() const;
- error_code const& error() const;
- message const& message() const;
- };
-
- struct connection
- {
- /// Schedule a frame to be sent at some point in the near future
- void
- send(std::string_view data, bool as_text = true);
-
- /// Suspend and wait until either there is a message available or an error
- net::awaitable
- consume();
-
- /// Close the websocket and suspend until it is closed.
- net::awaitable
- close(beast::websocket::close_reason = /* a sensible default */);
-
- /// Send the close frame to the server but don't hang around to wait
- /// for the confirmation.
- void
- drop(beast::websocket::close_reason = /* a sensible default */);
-
- /// If consume() exits with an error of beast::websocket::error::closed then this
- /// will return the reason for the closure as sent by the server.
- /// Otherwise undefined.
- beast::websocket::close_reason
- reason() const;
- };
-
- net::awaitable
- connect(std::string url,
- connect_options options /* = some default */);
-}
-```
-
-The idea here is to keep the interface as lightweight and as simple as possible. The websocket connection will run on
-the executor of the coroutine that created it. Any commands sent to the websocket will be executor safe. That is,
-internally their work will be dispatched to the websocket connection's executor. The exception to this will be the
-close_reason method, which must only be called once the connection's consume coroutine has returned an error event.
-It is a guarantee that once `consume` returns an event that is an error, it will never return anything else, and no
-other method on the connection will mutate its internal state. In this condition, it is legal to call the `reason`
-method.
-
-A typical use would look like this:
-
-```cpp
- // default options
- auto ws = co_await websocket::connect("wss://echo.websocket.org");
-
- for(;;)
- {
- auto event = co_await ws.consume();
- if (event.is_error())
- break;
- else
- on_message(std::move(event.message()));
- }
-```
-
-The above code example does not provide any means to write to the websocket. But it would be trivial to either spawn
-another coroutine to handle the writer, or call a function in order to signal some orthogonal process that the websocket
-was ready.
-
-```cpp
- // default options
- auto ws = co_await websocket::connect("wss://echo.websocket.org");
-
- on_connected(ws); // The websocket object should be shared-copyable
-
- for(;;)
- {
- auto event = co_await ws.consume();
- if (event.is_error()) {
- on_close();
- break;
- }
- else
- on_message(std::move(event.message()));
- }
-```
-
-Another way to visualise a websocket is exactly as javascript's websocket connection does, using callbacks or
-continuations in order to notify user code that the websocket has received data or closed. It would be trivial to wrap
-our coroutine version in order to provide this functionality. We would need to spawn a coroutine in order to run
-the `consume()` loop and then somehow signal it to stop if the websocket was disposed of.
-
-User code might then start to look something like this:
-
-```cpp
- websocket::connect("wss://echo.websocket.org", options)
- .on_connect([](websocket::connection ws)
- {
- run_my_loop(ws);
- });
-
-void run_my_loop(websocket::connection ws)
-{
- bool closed = false;
- ws.on_close([&]{ closed = true; });
- ws.on_error([&]{ closed = true; });
- ws.on_message([&](websocket::message msg){ process_message(msg); });
-
- // some time later
- ws.send("Hello, World!");
-}
-```
-
-With this style of interface we would need some means of passing the executor on which the continuations would be
-invoked. A reasonable place to do this might be the `options` parameter.
-
-In the JavaScript style interface, it would be important to be able to detect when the websocket has gone out of scope
-and ensure that it closes correctly, otherwise we'll have a rogue resource out there with no handle by which we can
-close it. This argues that the actual `websocket::connection` class should be a handle to an internal implementation
-and that the destructor of the handle should ensure that the implementation is signalled so that it can `drop` the
-connection and shutdown cleanly. Under the covers, we're implementing this websocket in Boost.Beast. As with all Asio
-objects, there could be (probably will be) asynchronous operations in progress at the time the websocket handle goes
-out of scope.
-
-Thinking this through, it means that:
- - The implementation is going to live longer than the lifetime of the last copy of the handle owning the implementation.
- - There needs to be some mechanism to cancel the underlying implementation's operations.
-
-Coroutines can be visualised as threads of execution. In the world of threads (e.g. `std::thread`) we have primitives
-such as `std::stop_token` and `std::condition_variable`. The C++ Standard Library does not yet have these primitives
-for coroutines. And if it did it would be questionable whether they would be suitable for networking code where
-coroutines are actually built on top of Asio composed operations. Does Asio itself provide anything we can use?
-
-## Asio's Hidden Asynchronous condition_variable
-
-The answer is surprisingly, yes. But not in the form I was expecting when I asked Chris Kohlhoff (Asio's author and
-maintainer) about it. It turns out that asio's timer models an asynchronous version of a condition variable perfectly.
-Consider:
-
-Given:
-```cpp
-auto t = net::steady_timer(co_await net::this_coro::executor);
-t.expires_at(std::chrono::stready_clock::time_point::max());
-```
-
-Then we can write:
-
-```cpp
-template
-net::awaitable
-wait(net::steady_timer& t, Pred predicate)
-{
- error_code ec;
- while(!ec && !predicate())
- {
- co_await t.async_wait(net::redirect_error(net::use_awaitable, ec));
- if (ec == net::error::operation_aborted)
- // timer cancelled
- continue;
- else
- throw std::logic_error("unexpected error");
- }
-}
-
-void
-notify(net::steady_timer& t)
-{
- // assuming we are executing on the same executor as the wait()
- t.cancel();
-}
-```
-
-Which gives us a simple asynchronous condition_variable (this one does not implement timeouts, but it would be trivial
-to extend this code to accommodate them).
-
-## Asynchronous Stop Token
-
-The `std::stop_token` is a welcome addition to the standard, but it is a little heavyweight for asynchronous code that
-runs in an executor, which is already thread-safe by design. A simple in-executor stop source can be implemented
-something like this:
-
-```cpp
-namespace async {
-namespace detail {
-struct shared_state {
- void stop()
- {
- if (!std::exchange(stopped_, true))
- {
- auto sigs = std::move(signals_);
- signals_.clear();
- for(auto& s : sigs)
- s();
- }
- }
-
- std::list> signals_;
- bool stopped_ = false;
-};
-}
-struct stop_source {
- void stop() {
- impl_->stop();
- }
- std::shared_ptr impl_;
-}
-
-struct stop_connection
-{
-};
-
-struct stop_token
-{
- stop_token(stop_source& source)
- : impl_(source.impl_) {
- }
-
- bool
- stopped() const { return !impl_ || impl_->stopped_; }
-
- stop_connection
- connect(std::function slot);
-
- std::shared_ptr impl_;
-}
-}
-```
-
-The use case would look something like this:
-
-```cpp
-
-net::awaitable
-something_with_websocket(async::stop_token token)
-{
- // passing the stop token allows the connect call to abort early
- // if the owner of the stop_source wants to end the use of the
- // websocket before it is connected
- auto ws = websocket::connect("wss://echo.websocket.org",
- websocket::connect_options { .stop_token = token });
-
- // connect a slot to the stop token which drops the connection
- auto stopconn = token.connect([&] { ws.drop(); };
-
- for(;;} {
- auto event = co_await ws.consume();
- // ...etc
- }
-
-}
-```
-
-Now, armed with both a `stop_token` and a `condition_variable`, we gain a great deal of flexibility with programs
-running on an Asio-style executor.
-
-So let's build a little chat app to talk the the echo bot.
-
-## Coding style when using Asio coroutines.
-
-I mentioned earlier that I like to decompose objects with complex lifetimes into an impl and handle. My personal
-programming style for naming the components is as follows:
-
-### The implementation
-
-This is the class that implements the complex functionality that we want. I generally give this class an `_impl` suffix
-and apply the following guidelines:
-- The impl does not control its own lifetime.
-- Calls to the impl are expected to already be executing on the correct thread or strand, and in the case of
- multi-threaded code, are expected to have already taken a lock on any mutex.
-
-This is a personal preference which I find tends to lower the complexity of the object, since the interface functions
-do not have to manage more than one concern, and deadlocks etc are not possible.
-
-### The lifetime
-
-When holding an object in shared_ptr, we get a chance to intercept the destruction of the last handle. At this point
-we do not have to destroy the implementation, but can allow it to shut down gracefully before destruction.
-In order to do this, particularly with an object that is referenced by internal coroutines, I have found that it's
-useful to separate the public lifetime of the object, and its internal lifetime, which may be longer than the public
-one.
-
-A convenient, if not especially efficient way to do this is to hold two shared_ptr's in the handle. One being a
-shared_ptr which has a custom destructor - the lifetime ptr, and one being a normal shared_ptr to the
-implementation which can be copied in order to extend its private lifetime while it shuts down.
-It is the responsibility of the custom deleter to signal the implementation that it should start shutting down.
-
-In this case, the websocket connection's public handle may look something like this:
-
-```cpp
-namespace websocket {
-
-struct connection_lifetime
-{
- connection_lifetime(std::shared_ptr&& adopted)
- : impl_(std::move(adopted))
- , lifetime_(new_lifetime(impl_))
- {
- }
-
- static std::shared_ptr
- new_lifetime(std::shared_ptr const& impl)
- {
- static int useful_address;
- auto deleter = [impl](int*) noexcept
- {
- net::co_spawn(impl->get_executor(),
- [impl]() -> net::awaitable
- {
- co_await impl->stop();
- }, net::detached);
- };
-
- return std::shared_ptr(&useful_address, deleter);
- }
-
- std::shared_ptr impl_;
- std::shared_ptr lifetime_;
-};
-
-struct connection
-{
- connection(connection_lifetime l);
-};
-}
-```
-
-The interesting part here is in the function `new_lifetime`. There are a few things going on here.
-First, we are capturing the internal lifetime of our `connection_impl` and storing it in the deleter of the lifetime
-pointer. This of course means that the private implementation will live at least as long as the public lifetime.
-Secondly, the deleter does not actually delete anything. It merely captures a copy of the impl pointer and runs a
-coroutine on the impl to completion before releasing the impl pointer. The idea is that this coroutine will not complete
-until all internal coroutines within the implementation have completed. The provides the fortunate side effect that
-operations running inside the impl do not have to capture the impl's lifetime via shared_from_this.
-It turns out that this aids composability, since subordinate coroutines within the implementation can be written as free
-functions, and ported to other implementations that may not involve a shared_ptr.
-It also means that the impl itself can be composed, since it has no restrictions on lifetime semantics.
-i.e. If I wanted to implement a JSON-RPC connection by deriving from the websocket::connection_impl, I do not have to
-be concerned about translating shared_ptrs internally in the derived class.
-
-# Once it's all put together
-
-Finally, having created all the primitives (which I really should start collating into a library), we can test our
-little websocket chat client, which becomes a very simple program:
-
-Here's main:
-```cpp
-int
-main()
-{
- net::io_context ioctx;
-
- net::co_spawn(
- ioctx.get_executor(), [] { return chat(); }, net::detached);
-
- ioctx.run();
-}
-```
-
-And here's the chat() coroutine:
-
-```cpp
-net::awaitable< void >
-chat()
-{
- // connect the websocket
- auto ws = co_await websocket::connect("wss://echo.websocket.org");
-
- // spawn the coroutine to read console input and send it to the websocket
- auto stop_children = async::stop_source();
- net::co_spawn(
- co_await net::this_coro::executor,
- [stop = async::stop_token(stop_children), ws]() mutable {
- return do_console(std::move(stop), std::move(ws));
- },
- net::detached);
-
- // read events from the websocket connection.
- for (;;)
- {
- auto event = co_await ws.consume();
- if (event.is_error())
- {
- if (event.error() == beast::websocket::error::closed)
- std::cerr << "peer closed connection: " << ws.reason()
- << std::endl;
- else
- std::cerr << "connection error: " << event.error() << std::endl;
- break;
- }
- else
- {
- std::cout << "message received: " << event.message() << std::endl;
- }
- }
-
- // at this point, the stop_source goes out of scope,
- // which will cause the console coroutine to exit.
-}
-```
-
-And finally, the do_console() coroutine. Note that I have used asio's posix interface to collect console input.
-In order to run compile in a WIN32 environment, we'd need to do something different (suggestions welcome via PR!).
-
-```cpp
-net::awaitable< void >
-do_console(async::stop_token stop, websocket::connection ws)
-try
-{
- auto console = asio::posix::stream_descriptor(
- co_await net::this_coro::executor, ::dup(STDIN_FILENO));
- auto stopconn = stop.connect([&] { console.cancel(); });
-
- std::string console_chars;
- while (!stop.stopped())
- {
- auto line_len =
- co_await net::async_read_until(console,
- net::dynamic_buffer(console_chars),
- '\n',
- net::use_awaitable);
- auto line = console_chars.substr(0, line_len - 1);
- console_chars.erase(0, line_len);
- std::cout << "you typed this: " << line << std::endl;
- ws.send(line);
- }
-}
-catch(...) {
- // error handling here
-}
-```
-
-If you'd like to look into the complete code, submit a PR or offer some (probably well-deserved) criticism, you will
-find the [code repository here](https://github.com/madmongo1/blog-december-2020).
diff --git a/_posts/2021-01-01-RichardsNewYearUpdate.md b/_posts/2021-01-01-RichardsNewYearUpdate.md
deleted file mode 100644
index 556519c2c..000000000
--- a/_posts/2021-01-01-RichardsNewYearUpdate.md
+++ /dev/null
@@ -1,516 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's New Year Update - Reusable HTTP Connections
-author-id: richard
----
-
-# Reusable HTTP(S) Connections
-
-Something I am often asked by users of Boost Beast is how to code a client which effectively re-uses a pool of HTTP
-connections, in the same way a web browser does.
-
-The premise is straightforward - if our client is going to be making multiple calls to a web server (or several of them)
-then it makes sense that once a connection has been used for one request, it is returned to a connection pool so that
-a subsequent request can make use of it.
-
-It also makes sense to have a limit on the number of concurrent connections that can be open against any one host.
-Otherwise, if the client needs to make multiple requests at the same time, it will end up creating new connections in
-parallel and lose the efficiency of re-using an existing connection.
-
-From these requirements, we can start to think about a design.
-
-Firstly, we can imagine a connection cache, with connections kept in a map keyed on host + scheme + port (we can't
-re-use an HTTP port for an HTTPS request!).
-
-When a request needs a connection, it will either create a new one (connection limit per host not met) or will wait
-for an existing connection to become available (which implies a condition variable).
-
-Once a request has a connection to use, it will send the HTTP request and wait for the response.
-
-At this stage, there is a possibility that the active connection which has been allocated could have been idle since
-the last time it was used. In TCP there is no way to atomically check whether the remote host has closed the
-connection (or died). The only way to know is to actually read from the socket with a timeout. If the remote host has
-shutdown the socket, we will be notified as soon as the RST frame arrives at our host. If the remote host has stopped
-working or the network is bad, we'll be notified by the timeout.
-
-Thus if our read operation results in an error and we have inherited the connection from the cache, we ought to re-try
-by reopening the connection to the remote host and repeating the write/read operations. However, if there is an error
-reported on the subsequent attempt, then we can conclude that this is a legitimate error to be reported back to the
-caller.
-
-In simplified pesudocode, the operation might look something like this (assuming we report transport errors as
-exceptions):
-
-```cpp
-response
-read_write(connection& conn)
-{
- response resp;
-
- auto retry = false;
-
- if(conn.is_initialised())
- retry = true;
- else
- conn.connect(...);
-
- for(;;)
- {
- conn.write(request);
- auto err = conn.read(resp);
- if (err)
- {
- if(!std::exchange(retry, false))
- throw system_error(err);
- request.clear();
- conn.disconnect();
- }
- else
- break;
- }
-
- return resp;
-}
-```
-
-# Structuring the Code
-
-## General Principles
-
-In my previous [blog](https://cppalliance.org/richard/2020/12/22/RichardsDecemberUpdate.html) I mentioned my preference
-for writing the implementation of a class in such a way that it does not need to take care of its own lifetime or
-mutual exclusion. These concerns are deferred to a wrapper or handle. Methods on the handle class take care of
-marshalling the call to the correct executor (or thread) and preserving the implementation's lifetime. The
-implementation need only concern itself with the logic of handling the request.
-
-Here's an example about which I'll go into more detail later:
-```cpp
-net::awaitable< response_type >
-connection_cache::call(beast::http::verb method,
- const std::string & url,
- std::string data,
- beast::http::fields headers,
- request_options options)
-{
- // DRY - define an operation that performs the inner call.
- auto op = [&] {
- return impl_->call(method,
- url,
- std::move(data),
- std::move(headers),
- std::move(options));
- };
-
- // deduce the current executor
- auto my_executor = co_await net::this_coro::executor;
-
- // either call directly or via a spawned coroutine
- co_return impl_->get_executor() != my_executor
- ? co_await op()
- : co_await net::co_spawn(impl_->get_executor(), op, net::use_awaitable);
-}
-```
-
-## Coroutines
-
-In this implementation I will be providing a C++20 coroutine interface over Asio executors once again. I am using
-coroutines because they are easier to write when compared to Asio's composed operations, but are fundamentally the
-same thing in a prettier package.
-
-## Mutual Exclusion
-
-For mutual exclusion I will be embedded an asio strand into each active object. The advantage of doing so means that no
-thread of execution is ever blocked which means we can limit the number of threads in the program to the number of
-free CPUs giving us maximum throughput of work. In reality of course, one thread is more than enough computing power
-for almost all asynchronous programs. It's therefore better to think in terms of one _executor_ per program component,
-with the implicit guarantee that a given executor will only perform work on one thread at a time.
-Thinking this way allows us to write code in a way that is agnostic of whether the final program is compiled to be
-single or multi-threaded.
-
-## But What About Single-threaded Programs?
-
-In order that I don't need to rewrite code when should I decide to make a single-threaded program multi-threaded or vice
-versa, I have a couple of little utility functions and types defined in
-[config.hpp](https://github.com/madmongo1/blog-new-year-2021/blob/master/src/config.hpp)
-
-Specifically:
-```cpp
-namespace net
-{
-using namespace asio;
-
-using io_executor = io_context::executor_type;
-
-#ifdef MULTI_THREADED
-
-using io_strand = strand< io_executor >;
-
-inline io_strand
-new_strand(io_executor const &src);
-
-inline io_strand
-new_strand(io_strand const &src);
-
-#else
-
-using io_strand = io_context::executor_type;
-
-inline io_strand
-new_strand(io_executor const &src);
-
-#endif
-
-inline io_executor
-to_io_executor(any_io_executor const &src);
-} // namespace net
-```
-
-Any object type in the program which _would require its own strand_ in a multi-threaded program will simply use the type
-`io_strand` whether the program is compiled for single-threaded operation or not. Any code that would notionally
-need to construct a new strand simply calls `new_strand(e)` where `e` is either a strand or a naked executor.
-
-Any code that needs access to the notional underlying executor would call `to_io_executor(e)`.
-
-## Determinism
-
-Since we're using asio's executors for scheduling, it means that we can use asio's timer as a deterministic, ordered
-asynchronous condition variable, which means that requests waiting on the connection pool will be offered free
-connections in the order that they were requested. This guarantee is implicit in the way that the timers' `cancel_one()`
-method is specified.
-
-As we'll see later, asio's timers also make it trivial to implement an asynchronous semaphore. In this case we use one
-to ensure that requests are handled concurrently but no more than some upper limit at any one time.
-
-## Interface
-
-I'm going to create a high-level concept called a `connection_cache`. The interface will be something like this:
-
-```cpp
-struct connection_cache
-{
- using response_ptr = std::unique_ptr< response >;
-
- net::awaitable< response_ptr >
- rest_call(
- verb method,
- std::string const& url,
- std::optional data = std::nullopt,
- headers hdrs = {},
- options opts = {});
-};
-```
-
-There are a few things to note here.
-
- - The return type of the coroutine is a unique_ptr to a response. A natural question might be whether the response
- should simply be returned by value. However, in practice I have found that there are a number of practical reasons
- why it's often better to return the response as a pointer. Firstly it allows conversion to a
- `shared_ptr` in environments where the response might be passed through a directed acyclic graph.
- Secondly, would allow a further enhancement in that having finished with the response, the client could post it back
- to the cache, meaning that it could be cached for re-use.
- - The only two required arguments are the method and url. All others can be defaulted.
- - An optional string may be passed which contains the payload of a POST request. This is passed by value because,
- as we'll see later,the implementation will want to move this into the request object prior to initiating
- communications. I have chosen a string type for two reasons
- - text in the form of JSON or XML is the most common form of messaging in REST calls.
- - strings can contain binary data with no loss of efficiency.
- - `hdrs` is simply a list of headers which should be set in the request. Again, these are passed by value as they will
- be moved into the request object.
- - The last parameter of the call is an as-yet undefined type called `options`. This will allow us to add niceties like
- timeout arguments, a stop_token, redirect policies, a reference to a cookie store and so on.
-
-When called, `rest_call` will attempt to reuse an existing connection. If a connection is not available, it will create
-a new one if we are under the connection threshold for the deduced host and if not, it will wait until a connection is
-available.
-
-Furthermore, the number of concurrent requests will be throttled to some upper limit.
-
-Transport failures will be reported as an exception (of type `system_error`) and a successful response (even if a 400
-or 500) will be returned in the `response_ptr`. That is to say, as long as the transport layer works out, the code will
-take the non-exceptional path.
-
-## Implementation details
-
-### URL Parsing
-
-Among the things I am often asked about in the Beast slack channel and in the
-[Issue Tracker](https://github.com/boostorg/beast/issues) is why there is no URL support in Beast.
-The is that Beast is a reasonably low level library that concerns itself with the HTTP (and WebSocket)
-protocols, plus as much buffer and stream management as is necessary to implement HTTP over Asio.
-The concept of a URL the subject of its own RFCs and is a higher level concern.
-The C++ Alliance is working on [Boost.URL](https://github.com/CPPAlliance/url) but it is not ready for publishing yet.
-In the meantime, I found a nifty regex on the internet that more-or-less suffices for our needs:
-
-```cpp
- std::tuple< connection_key, std::string >
- parse_url(std::string const &url)
- {
- static const auto url_regex = std::regex(
- R"regex((http|https)://([^/ :]+):?([^/ ]*)((/?[^ #?]*)\x3f?([^ #]*)#?([^ ]*)))regex",
- std::regex_constants::icase | std::regex_constants::optimize);
- auto match = std::smatch();
- if (not std::regex_match(url, match, url_regex))
- throw system_error(net::error::invalid_argument, "invalid url");
-
- auto &protocol = match[1];
- auto &host = match[2];
- auto &port_ind = match[3];
- auto &target = match[4];
- /*
- auto &path = match[5];
- auto &query = match[6];
- auto &fragment = match[7];
- */
- return std::make_tuple(
- connection_key { .hostname = host.str(),
- .port = deduce_port(protocol, port_ind),
- .scheme = deduce_scheme(protocol, port_ind) },
- target.str());
- }
-```
-### Exceptions in Asynchronous Code Considered Harmful
-
-I hate to admit this, because I am a huge fan of propagating errors as exceptions. This is because the combination of
-RAII and exception behaviour handling makes error handling very slick in C++. However, coroutines have two rather
-unpleasant limitations:
-- You can't call a coroutine in a destructor.
-- You can't call a coroutine in an exception handling block.
-
-There are workarounds. Consider:
-
-```cpp
-my_component::~my_component()
-{
- // destructor
- net::co_spawn(get_executor(),
- [impl = impl_]()->net::awaitable
- {
- co_await impl->shutdown();
- // destructor of *impl happens here
- }, net::detached);
-}
-```
-This is the destructor of a wrapper object that contains a `shared_ptr` to its implementation, `impl_`. In this case
-we can detect the destruction of `my_component` and use this to spawn a new coroutine of indeterminate lifetime that
-takes care of shutting down the actual implementation and then destroying it.
-
-This solves the problem of RAII but it mandates that we must author objects that will be used in coroutines as
-a handle-body pair.
-
-We can similarly get around the "no coroutine calls in exception handlers" limitation if we're prepared to stomach code
-like this:
-
-```cpp
-net::awaitable
-my_coro()
-{
- std::function on_error;
- try {
- co_await something();
- }
- catch(...)
- {
- // set up error_handler
- on_error = [ep = std::current_exception] {
- return handler_error_coro(ep);
- };
- }
- // perhaps handle the error here
- if (on_error)
- co_await on_error();
-}
-```
-
-I think you'll agree that this is a revolting solution to an unforgivable omission in the language. Not only is it
-untidy, confusing, difficult to teach and error-prone, it also turns exception handling into same fiasco that is
-enforced checking of return codes.
-
-To add insult to injury, the error handling code in this function takes up 5 times as many lines as the logic!
-
-Therefore my recommendation is that in asynchronous coroutine code, it's better to avoid exceptions and have coroutines
-either return a tuple of (error_code, possible_value) or a variant containing error-or-value.
-
-For example, here is some code from the `connection_impl` in my example project:
-
-```cpp
-net::awaitable< std::tuple< error_code, response_type > >
-connection_impl::rest_call(request_class const & request,
- request_options const &options)
-{
- auto response = std::make_unique< response_class >();
-
- auto ec = stream_.is_open()
- ? co_await rest_call(request, *response, options)
- : net::error::basic_errors::not_connected;
-
- if (ec && ec != net::error::operation_aborted)
- {
- ec = co_await connect(options.stop);
- if (!ec)
- ec = co_await rest_call(request, *response, options);
- }
-
- if (ec || response->need_eof())
- stream_.close();
-
- co_return std::make_tuple(ec, std::move(response));
-}
-```
-
-### Ensuring that a Coroutine Executes on the Correct Executor
-
-In playing with asio coroutines I stumbled upon something that has become an idiom.
-
-Consider the situation where a `connection` class is implemented in terms of a handle and body. The body contains its
-own executor. In a multi-threaded build, this executor will be a `strand` while in a single threaded-build we would want
-it to be simply an `io_context::executor_type` since there will be no need for any of the thread guards implicit in a
-strand.
-
-Now consider that the implementation has a member coroutine called (say) `call`. There are two scenarios in which
-this member will be called. The first is where the caller is executing in the same executor that is associated with
-the implementation, the second is where the caller is in its own different executor.
-In the latter case, we must `post` or `spawn` the execution of the coroutine onto the implementation's executor in order
-to ensure that it runs in the correct sequence with respect to other coroutines initiated against it.
-
-The idiom that occurred to me originally was to recursively spawn a coroutine to ensure the call happened on the
-correct executor:
-
-```cpp
-net::awaitable< response_type >
-connection_cache::call(beast::http::verb method,
- const std::string & url,
- std::string data,
- beast::http::fields headers,
- request_options options)
-{
- auto my_executor = co_await net::this_coro::executor;
-
- if (impl_->get_executor() == my_executor)
- {
- co_return co_await impl_->call(method,
- url,
- std::move(data),
- std::move(headers),
- std::move(options));
- }
- else
- {
- // spawn a coroutine which recurses on the correct executor.
- // wait for this coroutine to finish
- co_return co_await net::co_spawn(
- impl_->get_executor(),
- [&]() -> net::awaitable< response_type > {
- return call(method,
- url,
- std::move(data),
- std::move(headers),
- std::move(options));
- },
- net::use_awaitable);
- }
-}
-```
-
-However, this does have the drawback that a code analyser might see the possibility of infinite recursion.
-
-After discussing this with [Chris](https://github.com/chriskohlhoff/asio/), Asio's author, a better solution was found:
-
-```cpp
-net::awaitable< response_type >
-connection_cache::call(beast::http::verb method,
- const std::string & url,
- std::string data,
- beast::http::fields headers,
- request_options options)
-{
- // DRY - define an operation that performs the inner call.
- auto op = [&] {
- return impl_->call(method,
- url,
- std::move(data),
- std::move(headers),
- std::move(options));
- };
-
- // deduce the current executor
- auto my_executor = co_await net::this_coro::executor;
-
- // either call directly or via a spawned coroutine
- co_return impl_->get_executor() != my_executor
- ? co_await op()
- : co_await net::co_spawn(impl_->get_executor(), op, net::use_awaitable);
-}
-```
-
-There are a few things to note:
-- All lifetimes are correct even though the `op` takes arguments by reference. This is because whichever path we take,
-our outer coroutine will suspend on the call to `op`.
-- Note that in the slow path, `op` is captured by value in the call to `co_spawn`. Had we written:
-`: co_await net::co_spawn(impl_->get_executor(), op(), net::use_awaitable);` then `op` would have been called _during_
- the setup of the call to `co_spawn`, which would result in `impl_->call(...)` being called on the wrong
- executor/thread.
-
-# Talk is Cheap
-
-TL;DR. Enough talk, where's the code?
-
-It's on github at [https://github.com/madmongo1/blog-new-year-2021](https://github.com/madmongo1/blog-new-year-2021).
-
-## Final Notes.
-
-Before signing off I just wanted to cover a few of the features I have completed in this example and a few that I
-have not.
-
-### What's Done
-
-- There is a limit on the number of concurrent connections to a single host. Host here is defined as a tuple of the
-transport scheme (i.e. http or https), port and hostname. This is currently defaulted to two, but would be trivial to
- change.
-
-- There is a limit on the number of concurrent requests across all hosts. This defaults to 1000. Simultaneous requests
-numbering more than this will be processed through what is an asynchronous semaphore, implemented like this:
- ```cpp
- while (request_count_ >= max_concurrent_requests_)
- {
- error_code ec;
- co_await concurrent_requests_available_.async_wait(
- net::redirect_error(net::use_awaitable, ec));
- }
-
- ++request_count_;
-
- ...
- ... request takes place here
- ...
-
- if (--request_count_ < max_concurrent_requests_)
- {
- concurrent_requests_available_.cancel_one();
- }
-
- ```
-
-### What's Not Done
-
-- HTTP 300 Redirect handling. I consider this to be a higher level concern than connection caching.
-- LRU Connection recycling. At the moment, the program will allow the number of connections to grow without limit if
- an unlimited number of different hosts are contacted. In a production system we would need to add more active state to
- each connection and have some logic to destroy old unused connections to make way for new ones.
-- The `stop_token` is not executor-aware. I have left the stop_token in for exposition but it should not be activated
- by a different executor to the one where the connection to it has been created at present. If you're interested to
- see how this will look when complete, please submit an issue against the github repo and I'll update the code and add
- a demonstration of it.
-- A Cookie Jar and HTTP session management. Again, these are higher level concerns. The next layer up would take care of
-these plus redirect handling.
-- The CMakeLists project in the repo has been tested with GCC 10.2 and Clang-11 on Fedora Linux. Microsoft developers
- may need to make the odd tweak to get things working. I'm more than happy to accept PRs.
-- Setting up of the `ssl::context` to check peer certificates. Doing this properly is complex enough to warrant another
- blog in its own right.
-
-Thanks for reading. I hope you've found blog useful. Please by all means get in touch by:
-- raising an [issue](https://github.com/madmongo1/blog-new-year-2021/issues)
-- Contacting me in the #beast channel of [CppLang Slack](https://cppalliance.org/slack/)
-- email [hodges.r@gmail.com](mailto:hodges.r@gmail.com)
-
diff --git a/_posts/2021-01-15-DroneCI.md b/_posts/2021-01-15-DroneCI.md
deleted file mode 100644
index a7dee0b51..000000000
--- a/_posts/2021-01-15-DroneCI.md
+++ /dev/null
@@ -1,192 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Drone CI
-author-id: sam
----
-# Overview
-
-A message currently appears (mid-January 2021) at the top of the travis-ci.org website.
-
-"Please be aware travis-ci.org will be shutting down in several weeks, with all accounts migrating to travis-ci.com. Please stay tuned here for more information."
-
-The transition has not been a smooth one, with long, disruptive delays occurring on existing builds, and lack of clear communication from the company. Many were unaware of the impending change. Some informative posts about the topic are [Travis CI's new pricing plan threw a wrench in my open source works](https://www.jeffgeerling.com/blog/2020/travis-cis-new-pricing-plan-threw-wrench-my-open-source-works) and [Extremely poor official communication of the .org shutdown](https://travis-ci.community/t/extremely-poor-official-communication-of-the-org-shutdown/10568).
-
-The C++ Alliance has decided to implement an in-house CI solution and make the new service available for Boost libraries also.
-
-# Selection Process
-
-The first step was choosing which software to use. There are truly a surprising number of alternatives. An extensive review was conducted, including many Continuous Integration services from [awesome-ci](https://github.com/ligurio/awesome-ci) which lists more than 50. Coincidentally, Rene Rivera had recently done an analysis as well for [ci_playground](https://github.com/bfgroup/ci_playground), and his example config files eventually became the basis for our new config files.
-
-The top choices:
-Appveyor
-Azure Pipelines
-BuildBot
-CircleCI
-CirrusCI
-Drone
-Github Actions
-Semaphore
-Shippable
-TeamCity
-
-From this list, Appveyor and Drone seemed the most promising to start with. Both allow for 100% self-hosting.
-
-## Appveyor
-
-The appeal of Appveyor is that the config files are basic yaml, and everything runs in a Docker container. It sounds perfect. However, after experimentation there were a few issues.
-1. Appveyor was originally designed for Microsoft Windows, with .NET and Powershell being key ingredients. While it can run on Linux, it's not a native Linux application. Most of the CI testing done by Boost and CPPAlliance runs on Linux.
-2. Specifically, the Docker experience on Windows is not nearly as smooth as Linux. I encountered numerous complexities when setting up Appveyor Windows Docker containers.
-3. Bugs in their app.
-
-Due to a combination of those reasons, Appveyor was not the best choice for this project.
-## Drone
-
-Within the first day or two experimenting with Drone, it became clear that this was an excellent CI framework:
-1. Simple, usable UI
-2. Easy installation
-3. Linux and Docker native
-4. Small number of processes to run
-5. Integrates with PostgreSQL, MySQL, Amazon S3
-6. Autoscale the agents
-7. Badges on github repos
-
-The main drawback with Drone is the absence of "matrix" builds. Their alternative for matrices is jsonnet or starlark, which are flexible scripting languages. I was apprehensive about this point, thinking that the end-user would prefer simple yaml files - exactly like Travis. And, in fact, that is probably the case. Basic yaml files are easier to understand. However, on balance, this was the only noticable problem with Drone, and everything else seemed to be in order. The resulting starlark files do have a matrix-like configuration where each job can be customized.
-
-To discuss [Starlark](https://docs.bazel.build/versions/master/skylark/language.html) for a moment - it's a subset of python, and therefore easy to learn. [Why would I use Starlark instead of just Python?](https://pypi.org/project/pystarlark/) "Sandboxing. The primary reason this was written is for the "hermetic execution" feature of Starlark. Python is notoriously difficult to sandbox and there didn't appear to be any sandboxing solutions that could run within Python to run Python or Python-like code. While Starlark isn't exactly Python it is very very close to it. You can think of this as a secure way to run very simplistic Python functions. Note that this library itself doesn't really provide any security guarantees and your program may crash while using it (PRs welcome). Starlark itself is providing the security guarantees."
-
-
-### Running a drone server
-
-Create a script startdrone.sh with these contents:
-```
-#!/bin/bash
-
-docker run \
- --volume=/var/lib/drone:/data \
- --env=DRONE_GITHUB_CLIENT_ID= \
- --env=DRONE_GITHUB_CLIENT_SECRET= \
- --env=DRONE_RPC_SECRET= \
- --env=DRONE_TLS_AUTOCERT= \
- --env=DRONE_SERVER_HOST= \
- --env=DRONE_SERVER_PROTO= \
- --env=DRONE_CONVERT_PLUGIN_ENDPOINT= \
- --env=DRONE_CONVERT_PLUGIN_SECRET= \
- --env=DRONE_HTTP_SSL_REDIRECT= \
- --env=DRONE_HTTP_SSL_TEMPORARY_REDIRECT= \
- --env=DRONE_S3_BUCKET= \
- --env=DRONE_LOGS_PRETTY= \
- --env=AWS_ACCESS_KEY_ID= \
- --env=AWS_SECRET_ACCESS_KEY= \
- --env=AWS_DEFAULT_REGION= \
- --env=AWS_REGION= \
- --env=DRONE_DATABASE_DRIVER= \
- --env=DRONE_DATABASE_DATASOURCE= \
- --env=DRONE_USER_CREATE= \
- --env=DRONE_REPOSITORY_FILTER= \
- --env=DRONE_GITHUB_SCOPE= \
- --publish=80:80 \
- --publish=443:443 \
- --restart=always \
- --detach=true \
- --name=drone \
- drone/drone:1
-```
-
-Fill in the variables. (Many of those are secure keys which shouldn't be published on a public webpage.)
-Then, run the script.
-
-```
-./startdrone.sh
-```
-
-Drone is up and running.
-
-Next, the starlark plugin. Edit startstarlark.sh:
-```
-#!/bin/bash
-
-docker run -d \
- --volume=/var/lib/starlark:/data \
- --env= \
- --publish= \
- --env=DRONE_DEBUG= \
- --env=DRONE_SECRET= \
- --restart=always \
- --name=starlark drone/drone-convert-starlark
-```
-and run it:
-
-```
-./startstarlark.sh
-```
-
-Starlark is up and running. Finally, the autoscaler.
-```
-#!/bin/bash
-
-docker run -d \
- -v /var/lib/autoscaler:/data \
- -e DRONE_POOL_MIN= \
- -e DRONE_POOL_MAX= \
- -e DRONE_SERVER_PROTO= \
- -e DRONE_SERVER_HOST= \
- -e DRONE_SERVER_TOKEN= \
- -e DRONE_AGENT_TOKEN= \
- -e DRONE_AMAZON_REGION= \
- -e DRONE_AMAZON_SUBNET_ID= \
- -e DRONE_AMAZON_SECURITY_GROUP= \
- -e AWS_ACCESS_KEY_ID= \
- -e AWS_SECRET_ACCESS_KEY= \
- -e DRONE_CAPACITY_BUFFER= \
- -e DRONE_REAPER_INTERVAL= \
- -e DRONE_REAPER_ENABLED= \
- -e DRONE_ENABLE_REAPER= \
- -e DRONE_AMAZON_INSTANCE= \
- -e DRONE_AMAZON_VOLUME_TYPE= \
- -e DRONE_AMAZON_VOLUME_IOPS= \
- -p \
- --restart=always \
- --name=autoscaler \
- drone/autoscaler
-```
-Start the autoscaler.
-```
-./startautoscaler.sh
-```
-
-Windows autoscaler is still experimental. For now, both Windows and Mac servers have been installed manually and will be scaled individually. Because they are less common operating systems, with most boost builds running in Linux, the CPU load on these other machines is not as significant.
-
-# Configs
-
-The real complexities appear when composing the config files for each repository. After manually porting .travis.yml for [https://github.com/boostorg/beast]( https://github.com/boostorg/beast) and [https://github.com/boostorg/json](https://github.com/boostorg/json), the next step was creating a Python script which automates the entire process.
-
-## Drone Converter
-
-A copy of the script can be viewed at [https://github.com/CPPAlliance/droneconverter-demo](https://github.com/CPPAlliance/droneconverter-demo)
-
-The idea is to be able to go into any directory with a .travis.yml file, and migrate to Drone by executing a single command:
-
-```
-cd boostorg/accumulators
-droneconverter
-```
-
-The converter ingests a source file, parses it with PyYAML, and dumps the output in Jinja2 templates. The method to write the script was by beginning with any library, such as [boostorg/array](https://github.com/boostorg/array), and just get that one working. Then, move on to others, [boostorg/assign](https://github.com/boostorg/assign), [boostorg/bind](https://github.com/boostorg/bind), etc. Each library contains a mix of travis jobs which are both similar and different to the previously translated libraries. Thus, each new travis file presents a new puzzle to solve, but hopefully in a generalized way that will also work for all repositories.
-
-Versions of [clang](https://clang.llvm.org/) ranging from 3.3 to 11 are targeted in the tests. While later releases such as clang-6 or clang-7 usually build right away without errors, the earlier versions in the 3.x series were failing to build for a variety of reasons. First of all, those versions are not available on Ubuntu 16.04 and later, which means being stuck on Ubuntu 14.04, preventing an across-the-board upgrade to a newer Ubuntu. Then, if a standard "base" clang is simultaneously installed, such as clang-7 or 9, this seems to cause other dependent packages and libraries to be installed, which conflict with clang-3. The solution was to figure out what travis does, and copy it. Travis images have downloaded and installed clang-7 into a completely separate directory, not using the ordinary system packages. Then the extra directory /usr/local/clang-7.0.0/bin has been added to the $PATH.
-
-Some .travis.yml files have a "matrix" section. Others have "jobs". Some .travis files place all the logic in one main "install" script. Others refer to a variety of script sections including "install", "script", "before_install", "before_script", "after_success", which may or may not be shared between the 20 jobs contained in the file. Some job in travis were moving (with the mv command) their source directory, which is baffling and not usually permitted in Jenkins or Drone. This must be converted to a copy command instead. "travis_wait" and "travis_retry" must be deleted, they are travis-specific. Many travis variables such as TRAVIS_OS_NAME or TRAVIS_BRANCH need to be set and/or replaced in the resulting scripts. Environment variables in the travis file might be presented as a list, or a string, without quotes, or with single quotes, or double quotes, or single quotes embedded in double quotes, or double quotes embedded in single quotes, or missing entirely and included as a global env section at the top of the file. The CXX variable, which determines the compiler used by boost build, isn't always apparent and could be derived from a variety of sources, including the COMPILER variable or the list of packages. The list of these types of conversion fixes goes on and on, you can see them in the droneconverter program.
-
-Apple Macs Developer Tools depends on Xcode, with an array of possible Xcode version numbers from 6 to 12, and counting. Catalina only runs 10 and above. High Sierra will run 7-9. None of these will operate without accepting the license agreement, however the license agreement might reset if switching to a newer version of Xcode. The presence of "Command Line Tools" seems to break some builds during testing, however everything works if "Command Line Tools" is missing and the build directly accesses Xcode in the /Applications/Xcode-11.7.app/Contents/Developer directory. On the other hand, the package manager "brew" needs "Command Line Tools" (on High Sierra) or it can't install packages. A solution which appears to be sufficiently effective is to “mv CommandLineTools CommandLineTools.bck”, or the reverse, when needed.
-
-Drone will not start on a Mac during reboot unless the Drone user has a window console sessions running too. So, Apple is not an ideal command-line-only remote server environment.
-
-# Drone Deployments
-
-Pull requests with the new drone configs were rolled out to all those boost repositories with 100% travis build success rate and 100% drone build success rate, which accounts for about half of boost libraries. The other half of boost libraries are either missing a .travis.yml file, or they have a couple of travis/drone jobs which are failing. These should be addressed individually, even if only to post details about it in the pull requests. Ideally, attention should be focused on these failing tests, one by one, until the jobs attain a 100% build success rate and the badge displays green instead of red. The [long tail](https://en.wikipedia.org/wiki/Long_tail) distribution of edge-cases require individual attention and rewritten tests.
-
-# Github Actions
-
-The droneconverter script is being enhanced to also generate Github Actions config files. A first draft, tested on a few boost libraries, is building Linux jobs successfully. Ongoing.
diff --git a/_posts/2021-01-31-RichardsJanuaryUpdate.md b/_posts/2021-01-31-RichardsJanuaryUpdate.md
deleted file mode 100644
index 85997b5d4..000000000
--- a/_posts/2021-01-31-RichardsJanuaryUpdate.md
+++ /dev/null
@@ -1,104 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's January Update
-author-id: richard
----
-
-# A year in the C++ Alliance
-
-January marks one year since I joined the C++ Alliance and started maintaining the Boost.Beast library.
-
-It's a pleasure to work with interesting and fun people who are passionate about writing C++, and in particular good
-C++.
-
-During the past year I have spent some time attending ISO WG21 meetings online as an alternate representative of the
-C++ Alliance. Prior to joining the organisation, during my life as a developer I always felt that the standards
-committee did the developer community a disservice. Without knowing much about the inner workings, it seemed to me that
-the committee lived in an Ivory Tower. So my intention was to see if there was a way to bring something useful to the table
-as a keen a prolific writer of C++ in the financial sector. In particular I had a personal interest in the
-standardisation of asynchronous networking, forked from the wonderful Asio library.
-
-I ended the year feeling no less jaded with the entire standards process, concluding that there is not much I can do to
-help.
-
-I feel it's important to say that the committee is attended by very bright, passionate people who clearly enjoy the C++
-language as much as I do.
-
-What I think does not work, at least from the point of view of delivering useful progress, is the process of committee
-itself. During my commercial life there has been one fundamental truth, which is that things go well when there is focus
-of attention and the taking of personal responsibility. It seems to me that committees in general undermine these
-important fundamentals. The upshot of this is that in my mind, C++ developers are not going to get the tools they need,
-in the timescales they need, if they wait for the slow grind of WG21's wheels of stone.
-
-It is to me noteworthy that many of the libraries I actually use (fmt, spdlog, boost, jwt, openssl, and so on) have been
-in some way standardised or are in the process of being standardised, but always in a lesser form than the original,
-created by a small, passionate team of individuals who enjoyed autonomy and freedom of design.
-
-Even now, if a feature is available in the standard and as a 3rd party library, I will almost always choose the third
-party version. It will generally have more features and a design undamaged by a process that externalises costs.
-
-Which brings me back to my old C++ mantra, proven true for me over the past 15 years or so, *Boost is the
-Standard*. Having said this, I must in fairness mention the wonderful fmtlib and spdlog libraries, and the gargantuan
-Qt, without which a back-end developer like me would never be able to get anything to display on a screen in a cross-
-platform manner.
-
-In the end I find myself in the same place I was a year ago: My view is that the only thing C++ needs is a credible
-dependency management tool. Given that, the developer community will produce and distribute all needed libraries, and
-the most popular will naturally become the standard ones.
-
-# The Year Ahead
-
-Therefore, it is my intention this year to do what I can to bring more utility to the Boost ecosystem, where one
-person can make a useful impact on the lives of developers, and taking personal responsibility for libraries is the
-norm.
-
-## The Big Three
-
-It is my view that there are a number of areas where common use cases have not been well served in Boost. These are of
-course JSON, Databases and HTTP clients.
-
-### JSON
-
-At the end of 2020, Vinnie and the team finally brought a very credible JSON library to Boost, which I have used to
-write some cryptocurrency exchange connectors. On the whole, it's proven to be a very pleasant and
-intuitive API. In particular the methods to simultaneously query the presence of a value and return a pointer on success
-with `if_contains`, `if_string` etc. have seen a lot of use and result in code that is readable and neat.
-
-Currently, Boost.JSON favours performance over accuracy with respect to parsing floating point numbers (sometimes a
-number is 1 ULP different to that which you'd expect from `std::strtod`). I had a little look to see if there was
-a way to address this easily. It transpired not. Parsing decimal representations of floating point numbers into a binary
-representation that will round-trip correctly is a hard problem, and beyond my level of mathematical skill.
-There is currently work underway to address this in the JSON repo.
-
-There is also work in progress to provide JSON-pointer lookups. This will be a welcome addition as it will mean I can
-throw away my jury-rigged "path" code and use something robust.
-
-### MySQL
-
-A very common database in use in the web world is of course MySQL. I have always found the official connectors somewhat
-uninteresting, particularly as there is no asynchronous connector compatible with Asio.
-
-That was until a certain Rubén Pérez decided to write an asio-compatible mysql connector from scratch, using
-none of the code in the Oracle connector. Rubén has started the arduous journey of getting his
-[library](https://anarthal.github.io/boost-mysql/index.html) ready for Boost review.
-
-I have been asked to be the review manager for this work, something I am happy to do as, whether or not the admission is
-ultimately accepted, I think the general principle of producing simple, self-contained libraries that meet a
-common requirement is to be encouraged.
-
-If this library is successful, I would hope that others will rise to the challenge and provide native connectors for
-other common database systems.
-
-### HTTP Client
-
-I mentioned in an earlier blog that an HTTP Client with a similar interface to Python's Requests library woudl be worked
-on. As it happens, other priorities took over last year. This year I will be focussing efforts on getting this library
-in shape for a proof of concept.
-
-### I mean Big Four
-
-Redis is ubiquitous in server environments. A quick search for Redis C++ clients turned up this
-[little gem](https://github.com/basiliscos/cpp-bredis). I'd like to find time to give this a try at some point.
-
diff --git a/_posts/2021-02-15-dmitrys-january-update.md b/_posts/2021-02-15-dmitrys-january-update.md
deleted file mode 100644
index d5057dac0..000000000
--- a/_posts/2021-02-15-dmitrys-january-update.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: Dmitry's January Update
-author-id: dmitry
----
-
-# Dmitry's January Update
-
-In January 2021 I started working on improving Boost.JSON for The C++ Alliance.
-The time during this first month was mostly spent on getting familiar with the
-project, particularly with its list of issues on GitHub.
-
-It turns out, half of the open issues are related to documentation. For
-example, the section about conversions might need a rewrite, and related
-entries in the reference need to provide more information. There should be more
-examples for parsing customizations. There also needs to be a separate section
-dedicated to library's named requirements. There was also a bug in coversion
-customization logic that was fixed by me this month.
-
-The next two large blocks are issues related to optimization opportunities and
-dealing with floating point numbers (some issues are present in both groups).
-The next group is issues related to build and continuous integration. A couple
-of build bugs were fixed in January.
-
-The final group consists of feature requests, mostly for convenient ways to
-access items inside `json::value`. And this month I started implementing one
-such feature -- Json.Pointer support. The work is still in the early stages,
-though.
diff --git a/_posts/2021-02-20-EmilsJanuaryUpdate.md b/_posts/2021-02-20-EmilsJanuaryUpdate.md
deleted file mode 100644
index 91a37b9db..000000000
--- a/_posts/2021-02-20-EmilsJanuaryUpdate.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: emil
-title: Emil's January Update
-author-id: emil
----
-
-# History
-
-This is my first entry on the C++ Alliance web site. I am happy to have been invited to join the organization.
-
-I first talked to Vinnie on Slack, and was surprised to find that he is practically a neighbor. So we got in touch, had breakfast together, talked about C++ and life, the universe and everything. We became friends, he took care of our cats when we flew to Europe (pre-Covid), and I brought back some [Rakia](https://en.wikipedia.org/wiki/Rakia#Bulgaria) (next time I'll bring him some of my father's home-made stuff!)
-
-# (Boost) URL
-
-My immediate focus after joining the C++ Alliance is to work on what will hopefully become Boost.URL. C++ has long been missing a solid standard-conforming library for working with URLs, we often get contacted by impatient users asking about the state of things.
-
-Building on the foundation already in place and requiring only C++11, the library is being developed to the highest standards of portability, safety and efficiency. We're also careful to keep the design simple, with a lot of attention paid to minimizing physical coupling so that compilation is as fast as possible.
-
-# About
-
-I started coding in middle school on the Bulgarian-made Apple ][ clone [Правец-82](https://www.zdnet.com/article/how-these-communist-era-apple-ii-clones-helped-shape-central-europes-it-sector/) (named after -- you guessed it -- the birth place of the leader of the Bulgarian Communist Party at the time). This has resulted in a lot of useless information being engraved on my brain, I still remember some of the opcodes of the 6502 CPU. :)
-
-After graduating the Sofia University I moved to L.A. to work in the video game industry where I've spent most of my professional career, starting with the original Playstation all the way to modern consoles and handhelds. For the last few years I've shifted to projects that utilize my background in realtime graphics engines in areas that are not directly connected to gaming.
-
-Pretty much all of my projects have been written in C++. This includes several Boost libraries, the latest one being LEAF, a Lightweight Error Augmentation Framework for C++11. I'll probably post about error handling at some later time, as this is one of my key areas of interest.
-
-Thanks for reading.
-
-Emil Dotchevski
diff --git a/_posts/2021-03-30-RichardsMarchUpdate.md b/_posts/2021-03-30-RichardsMarchUpdate.md
deleted file mode 100644
index 848a2c12e..000000000
--- a/_posts/2021-03-30-RichardsMarchUpdate.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's February/March Update
-author-id: richard
----
-
-# Boost.PropertyTree is back in the saddle!
-
-Last month I took over the maintenance of the Boost Property Tree library.
-
-This library is in ubiquitous use but has been without a full time maintainer for some time.
-There was a backlog of commits on develop that had not made it to master for the boost releases.
-This is changing in upcoming 1.76 release. Property Tree PRs and issues will get more attention going forward.
-In addition, Property Tree has gained CI courtesy of GitHub Actions. The test status can be seen
-[here](https://github.com/boostorg/property_tree)
-
-# Beast CI
-
-Beast has acquired two new continuous integration pipelines. Drone and Github Actions.
-Drone is the canonical source of truth for the Linux/Windows badge on the
-[readme page](https://github.com/boostorg/beast) while GitHub Actions provides a more comprehensive matrix of
-targets, C++ standards and compiler versions which I use during the development cycle.
-
-# Boost Version 1.76
-
-The 1.76 release of boost is imminent. As far as Beast and Property Tree is concerned, this is a maintenance release.
-
-# Other Activities
-
-Much of the past two months has been devoted to resolving user queries, and examining suggestions for changes to Beast
-and Property Tree.
-
-Changes to Property tree are not taken likely. It is used in a great deal of legacy code in the wild and there are few
-tests.
-It would be a shame to break existing code noisily, or worse, silently.
-For better or worse, the behaviour is probably not going to change very much going forward until there are more tests in
-place.
-
-Accepting changes to beast's behaviour or interface is something we consider very carefully for a different reason.
-Beast is already a complex library. Like Asio upon which it is built, it is already extremely configurable, with a
-myriad (actually potentially an unbounded set) of completion tokens, completion handlers and buffer types.
-
-Something I am often asked is whether we would consider a "multi-send" interface. For example:
-
-```cpp
-std::vector messages;
-ws.write(buffer, messages);
-```
-
-Arguably there are some efficiencies available here, since we could potentially build all the resulting websocket frames
-in one pass, and send as one IO operation.
-
-There are some issues however. One is buffer space. What do we do if the buffer runs out of space while be are half way
-through building the frames? Fail the operation, return a partial failure? Block?
-Also, what do we do if the ensuing IO operation(s) partially fail(s)? Asio currently has no protocol to report a partial
-failure. We would have to create such a protocol, test it and then teach it.
-
-In reality people's requirements are often different. Some people may require confirmation of each frame within a time
-period, some will want all-or-nothing delivery and some are happy with partial delivery.
-
-One approach is to start providing options for such things as timeouts, partial completion, etc. But the feeling here is
-that this starts to dictate to users of the library how they write code, which we feel is outside the remit of Beast.
-
-Perhaps there is a case for a function that separates out the building of a websocket frame from the sending of it.
-I'll give it some thought.
diff --git a/_posts/2021-04-30-RichardsAprilUpdate.md b/_posts/2021-04-30-RichardsAprilUpdate.md
deleted file mode 100644
index 149678088..000000000
--- a/_posts/2021-04-30-RichardsAprilUpdate.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's April Update
-author-id: richard
----
-
-# A new responsibility
-
-Last month I took over the maintenance of the Boost Property Tree library.
-
-This library is in ubiquitous use but has been without a full time maintainer for some time.
-There was a backlog of commits on develop that had not made it to master for the boost releases.
-This is changing in upcoming 1.76 release. Property Tree PRs and issues will get more attention going forward.
-In addition, Property Tree has gained CI courtesy of GitHub Actions. The test status can be seen
-[here](https://github.com/boostorg/property_tree)
-
-# Beast CI
-
-Beast has acquired two new continuous integration pipelines. Drone and Github Actions.
-Drone is the canonical source of truth for the Linux/Windows badge on the
-[readme page](https://github.com/boostorg/beast) while GitHub Actions provides a more comprehensive matrix of
-targets, C++ standards and compiler versions which I use during the development cycle.
-
-# Boost Version 1.76
-
-The 1.76 release of boost is imminent. As far as Beast and Property Tree is concerned, this is a maintenance release.
-
diff --git a/_posts/2021-05-30-RichardsMayUpdate.md b/_posts/2021-05-30-RichardsMayUpdate.md
deleted file mode 100644
index a67da3004..000000000
--- a/_posts/2021-05-30-RichardsMayUpdate.md
+++ /dev/null
@@ -1,468 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's May 2021 Update
-author-id: richard
----
-
-# The Month in Review
-
-It's been a month of minor maintenance fixes, and a fair amount of support requests via
-the [C++ Alliance Slack workspace](https://cppalliance.org/slack/).
-
-## Property_tree
-
-On the maintenance front, there are a number of historic pull requests in the property_tree repo which need working
-through. Some of these take some unravelling and a degree of care, as I am still new to this venerable library and I
-have the impression that it is used fairly ubiquitously in many code bases of varying pedigree.
-
-Currently I have no way of reaching out to users (not knowing exactly who they are) so the only way to know whether a
-change is going to break someone's build is to release it, by which time it is too late.
-
-I think the answer here is to start building out more test cases. Property_tree only recently gained CI, and so far I
-have not gotten around to adding test coverage. No doubt I'll get to this in due course.
-
-## Beast
-
-There are a lot of eager developers out there keen to use Beast and Asio, which is encouraging. The less encouraging
-thing is the amount of time I find myself spending giving ad-hoc support to people who have hit the Asio mental brick
-wall (which I remember when learning this fantastic library all too well).
-
-I have written blogs in this series before covering some of the topics I think are important and developers often
-misunderstand, but there is more to do.
-
-With this in mind, an idea has been germinating over the past few months, which finally started to develop into a new
-library this month. I'll come back to this later.
-
-## Asio
-
-A few months ago I attended a WG21 meeting where a formal means of providing cancellation to asynchronous operations was
-proposed. A few people at that meeting, myself included, were concerned that the proposal in its current form would
-constrain the development style of asynchronous programs, making the fundamental objects a little more complex than they
-often need to be.
-
-I have recognised that Asio needs a formal task cancellation mechanism for some time, this being the basis of the async
-cancellation_token mentioned in a previous blog.
-
-I have been able to get some of Chris Kohlhoff's valuable time to discuss this to see whether there is a way to get
-effortless cancellation into Asio without impacting performance or compiled size when cancellation is not required.
-
-Chris, as he is wont to do, made the rather brilliant connection that in Asio, a 1-shot cancellation token can be
-associated with each asynchronous completion handler, with the default token type being a zero-cost abstraction of a
-null cancellation token - i.e. one that will never invoke the stop callback.
-
-The general idea being that if you want an operation to be cancellable, you would invoke it like this:
-
-```cpp
-// This is the signal object that you would use to
-// cancel any operation that depends on one of its slots
-asio::cancellation_signal sig;
-
-// some IO Object
-timer t(ioc, chronons::seconds(5));
-
-// perform an asynchronous operation bound to the cancellation signal
-t.async_wait(
- bind_cancellation_slot(sig.slot(),
- [](system::error_code ec)
- {
- // if the signal is invoked, the timer's asynchronous operation will notice
- // and the operation will complete with ec equal to asio::errors::operation_aborted
- });
-
-// signal the cancellation
-sig.emit():
-```
-
-The interesting thing about this is that the cancellation slot is associated with the asynchronous operation's handler.
-This is not only useful for a library-provided asynchronous operation such as a timer wait. Because of the existence of
-a function called `get_associated_cancellation_slot(handler)`, the current slot is available in any context where user
-code has access to the current asynchronous completion handler.
-
-One such place is in a user-defined composed operation, and therefore by extension, a c++ coroutine running in the
-context of an Asio executor.
-
-This now becomes possible:
-
-```cpp
-
-asio::awaitable
-my_coro(some_async_op& op)
-{
- // The cancellation state allowed us to detect whether cancellation has been requested
- // It also allows us to register our own cancellation slot
- auto cs = asio::this_coro::cancellation_state;
-
- // Create a new slot from the cancellation state and register a callback which will
- // invoke our own custom cancel signal on the some_async_op
- // note: A
- auto slot = cs.slot();
- slot.emplace([&]{ op.cancel(); });
-
- // continue to wait on the some_async_op
- co_await op.wait_to_finish();
-}
-```
-
-This coroutine could be invoked in a couple of ways:
-
-```cpp
-
-// In this case the cancellation state is a no-op cancellation.
-// the code at note A above will do nothing. This coroutine is not cancellable.
-co_await asio::co_spawn(exec,
- my_coro(op),
- asio::use_awaitable);
-
-// In this case, the coroutine has become cancellable because the code at note A will actually
-// create a functioning slot and register the lambda.
-// The coroutine is cancellable through the cancellation signal sig.
-asio::cancellation_signal sig;
-co_await asio::co_spawn(exec,
- my_coro(op),
- asio::bind_cancellation_signal(
- asio::use_awaitable, sig));
-```
-
-This code is experimental at the moment, but is available
-[on the generic-associators branch of Boost.Asio](https://github.com/boostorg/asio/tree/generic-associators).
-
-# The Project du Jour
-
-Coming back to the "Asio is hard at the beginning" meme, I was speaking to my son recently. He works with a number of
-languages, including Python, Go and C++.
-
-During a conversation about these he mentioned that Go was a very uninspiring language (to him) but it was very easy to
-get fairly complex asynchronous programs functioning reliably in a short amount of time.
-
-I asked him what the single most effective feature of the language was, to which he replied, "channels".
-
-For anyone who does not already know, a golang channel is simply a multi-producer, multi-consumer ring buffer with an
-asynchronous interface.
-
-It has the following behaviour:
-
-- Producer coroutines will suspend when providing values to the channel if the ring buffer is full and there is no
- consumer pending a consume operation.
-- Consumer coroutines will suspend when consuming if the ring buffer is empty and there is no pending producer operation
- in progress.
-- The ring buffer capacity is specified upon construction, and may be zero. Producers and consumers of a zero-sized
- channel will only make progress if there is a corresponding pair of producer and consumer pending at the same time. In
- this way, the channel also acts as a coroutine synchronisation primitive.
-- Finally, the channel may be closed. A closed channel will allow a consumer to consume remaining values in the ring
- buffer, but it will not allow a producer to provide more values, whether into the ring buffer or directly to a pending
- consume operation. A consume operation against an empty, closed channel will yield a default-constructed object plus a
- boolean false indicating that there are no more values to consume.
-
-There are some other nice features in Go, such as the select keyword which interact with channels in a pleasing way, but
-for now I'll focus on how we might implement the channel in asynchronous C++.
-
-The rationale here being:
-
-- Channels make writing complex asynchronous interactions simple.
-- Make simple things simple is the mantra to which I subscribe.
-- Perhaps C++ enthusiasts would benefit from an implementation of channels.
-- Given the flexibility of C++, we might be able to do a better job than Go, at least in terms of giving the programmer
- some choice over implementation tradeoffs.
-- Maybe a little library offering this functionality in a simple, reusable way would be a useful addition to Boost.
-
-I put some feelers out in the CppLang slack. So far the response to the idea has been only positive. So I decided to
-make a start.
-
-TLDR - you can monitor how far I am getting by looking at
-the [Github repository](https://github.com/madmongo1/boost_channels).
-
-## Approach
-
-I wanted the channels library to be built on top of Asio. The reason for this is that I happen to think that the Asio
-executor model is very elegant, and allows the programmer to transpose the same fundamental idea onto a number of
-different concurrency strategies. For example, thread pools, IO loop, threads and futures, and so on.
-
-Asio's completion tokens allow the adaptation of asynchronous initiating functions to any or all of these strategies and
-I wanted to make sure that the library will provide this functionality.
-
-Furthermore, asynchronous programs become complex quickly. Asio is a natural fit for IO, but does not provide the
-primitives that programmers often find they need to create rich programs.
-
-It is my hope that this channels library provides people with a useful tool to make high performance, highly concurrent
-programs easier to write in C++.
-
-## Design Decisions
-
-I have elected to write library in two sections. The first will contain the basic objects to handle the concurrent
-communication and asynchronous completions. These objects will not be thread-safe, just like any other object in Asio.
-
-The second will be a thread-safe interface written in terms of the first. The truth is that Asio objects do not need to
-be thread-safe if programmers use the correct discipline vis-a-vis strands and ensuring that work is dispatched to the
-correct strand. Another truth is that many programmers just want things to be easy. So why not provide an easy-mode
-interface too?
-
-## Comparison
-
-OK, so let's take a simple Go program and see how we could express that in terms of Asio and C++ coroutines. Now I'm no
-expert, so I'm sure there are many ways to improve this program. It's about the third Go program I've ever written.
-Please by all means let me know.
-
-```go
-package main
-
-import (
- "fmt"
- "sync"
-)
-
-func produce(wg *sync.WaitGroup, c chan<- string) {
- defer wg.Done()
- c <- "The"
- c <- "cat"
- c <- "sat"
- c <- "on"
- c <- "the"
- c <- "mat"
- close(c)
-}
-
-func consume(wg *sync.WaitGroup, name string, c <-chan string) {
- defer wg.Done()
- for {
- s, more := <-c
- if more {
- fmt.Println(name, ":", s)
- } else {
- fmt.Println(name, ": Channel closed", name)
- break
- }
- }
-}
-
-// Main function
-func main() {
- var wg sync.WaitGroup
- wg.Add(4)
- c := make(chan string)
- go consume(&wg, "a", c)
- go consume(&wg, "b", c)
- go consume(&wg, "c", c)
- go produce(&wg, c)
- wg.Wait()
-}
-```
-
-And this is how I would envision it would look in the first cut of the C++ version:
-
-```cpp
-auto
-produce(channels::channel< std::string > &c)
- -> asio::awaitable< void >
-{
- constexpr auto wait = asio::use_awaitable;
- co_await c.async_send("The", wait);
- co_await c.async_send("cat", wait);
- co_await c.async_send("sat", wait);
- co_await c.async_send("on", wait);
- co_await c.async_send("the", wait);
- co_await c.async_send("mat", wait);
- c.close();
-}
-
-auto
-consume(std::string_view name, channels::channel< std::string > &c)
- -> asio::awaitable< void >
-{
- auto ec = channels::error_code();
- auto tok = asio::redirect_error(asio::use_awaitable, ec);
- for (;;)
- {
- auto s = co_await c.async_consume(tok);
- if (ec)
- {
- std::cout << name << " : " << ec.message() << "\n";
- break;
- }
- else
- std::cout << name << " : " << s << "\n";
- }
-}
-
-int
-main()
-{
- auto ioc = asio::io_context();
- auto c = channels::channel< std::string >(ioc.get_executor());
-
- asio::co_spawn(ioc, consume("a", c), asio::detached);
- asio::co_spawn(ioc, consume("b", c), asio::detached);
- asio::co_spawn(ioc, consume("c", c), asio::detached);
- asio::co_spawn(ioc, produce(c), asio::detached);
-
- ioc.run();
-}
-```
-
-One example of the output of the Go program (the order is actually nondeterministic) is:
-
-```text
-a : The
-a : cat
-b : sat
-b : mat
-b : Channel closed b
-a : on
-a : Channel closed a
-c : the
-c : Channel closed c
-```
-
-while the output of the C++ program is a more deterministic:
-
-```text
-a : The
-b : cat
-c : sat
-a : on
-b : the
-c : mat
-a : Channel is closed
-b : Channel is closed
-c : Channel is closed
-```
-
-I'm not an expert in Go by any means but I imagine the nondeterminism in the Go program is in part due to the fact that
-the goroutine implementation is allowed to take shortcuts to consume data synchronously if it's available. The Asio
-model requires that each completion handler is invoked as-if by a call to `post(handler)`. In this program, these posts
-are being made to a single-threaded io_context and so are being executed sequentially, preserving the order of
-invocation during execution.
-
-If this program were multi-threaded, it might be a different story. But this will have to wait until the basic
-single-threaded implementation is complete.
-
-## Implementation Details
-
-The implementation of the channel is actually fairly straightforward. The asynchronous initiation interfaces are
-standard asio, e.g.:
-
-```cpp
-template < class ValueType, class Executor >
-template < BOOST_ASIO_COMPLETION_TOKEN_FOR(void(error_code)) SendHandler >
-BOOST_ASIO_INITFN_RESULT_TYPE(SendHandler, void(error_code))
-channel< ValueType, Executor >::async_send(value_type value,
- SendHandler &&token)
-{
- if (!impl_) [[unlikely]]
- BOOST_THROW_EXCEPTION(std::logic_error("channel is null"));
-
- return asio::async_initiate< SendHandler, void(error_code) >(
- [value = std::move(value), this](auto &&handler) {
- auto send_op = detail::create_channel_send_op(
- std::move(value),
- this->impl_->get_executor(),
- std::forward< decltype(handler) >(handler));
- impl_->notify_send(send_op);
- },
- token);
-}
-```
-
-The macros are supplied by Asio and simply ensure that the most up-to-date compiler facilities are used to ensure that
-the completion token/handler has the correct signature. `BOOST_ASIO_INITFN_RESULT_TYPE` deduces the return type of the
-selected specialisation of `async_initiate`. It is what ensures that `async_send` returns an awaitable when the
-completion token is of type `asio::use_awaitable`, or a `std::future` if we were to pass in `asio::use_future`.
-
-The actual work of the send is performed in the implementation class:
-
-```cpp
- void
- notify_send(detail::channel_send_op_concept< ValueType > *send_op)
- {
- // behaviour of send depends on the state of the implementation.
- // There are two states, running and closed. We will be in the closed
- // state if someone has called `close` on the channel.
- // Note that even if the channel is closed, consumers may still consume
- // values stored in the circular buffer. However, new values may not
- // be send into the channel.
- switch (state_)
- {
- case state_running:
- [[likely]] if (consumers_.empty())
- {
- // In the case that there is no consumer already waiting,
- // then behaviour depends on whether there is space in the
- // circular buffer. If so, we store the value in the send_op
- // there and allow the send_op to complete.
- // Otherwise, we store the send_op in the queue of pending
- // send operations for later processing when there is space in
- // the circular buffer or a pending consume is available.
- if (free())
- push(send_op->consume());
- else
- senders_.push(send_op);
- }
- else
- {
- // A consumer is waiting, so we can unblock the consumer
- // by passing it the value in the send_op, causing both
- // send and consume to complete.
- auto my_receiver = std::move(consumers_.front());
- consumers_.pop();
- my_receiver->notify_value(send_op->consume());
- }
- break;
- case state_closed:
- // If the channel is closed, then all send operations result in
- // an error
- [[unlikely]] send_op->notify_error(errors::channel_closed);
- break;
- }
- }
-```
-
-An interesting feature of the send operation class is that when it is instructed to complete, it must:
-
-- Move the value out of itself,
-- Move the completion handler out of itself,
-- Destroy itself, returning memory back to the allocator.
-- Post the completion handler to the correct executor.
-- Return the value.
-
-The order is important. Later on we will be adding Asio allocator awareness. In order to maximise efficiency, Asio
-asynchronous operations must free their memory back to the allocator before completing. This is so that during the
-execution of the completion handler, the same memory that was just freed into asio's special purpose allocators will be
-allocated and used to compose the next completion handler. This memory will be at the head of the allocator's list of
-free blocks (and therefore found first) and it will be in cached memory, having just been touched.
-
-```cpp
-template < class ValueType, class Executor, class Handler >
-auto
-basic_channel_send_op< ValueType, Executor, Handler >::consume() -> ValueType
-{
- // move the result value to the local scope
- auto result = std::move(this->value_);
-
- // move the handler to local scope and transform it to be associated with
- // the correct executor.
- auto handler = ::boost::asio::bind_executor(
- std::move(exec_),
- [handler = std::move(handler_)]() mutable { handler(error_code()); });
-
- // then destroy this object (equivalent to delete this)
- destroy();
-
- // post the modified handler to its associated executor
- asio::post(std::move(handler));
-
- // return the value from the local scope to the caller (but note that NRVO
- // will guarantee that there is not actually a second move)
- return result;
-}
-```
-
-That's all for now. I'll add extra blog entries as and when I make any significant progress to the library.
-
-In the meantime, I'm always happy to receive queries by email or as issues in the github repo.
-
-Thanks for reading.
-
-Richard Hodges
-for C++ Alliance
-[hodges.r@gmail.com](mailto:hodges.r@gmail.com)
diff --git a/_posts/2021-10-10-RichardsOctoberUpdate.md b/_posts/2021-10-10-RichardsOctoberUpdate.md
deleted file mode 100644
index 058dcd633..000000000
--- a/_posts/2021-10-10-RichardsOctoberUpdate.md
+++ /dev/null
@@ -1,455 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's October Update
-author-id: richard
----
-
-# Aims and Objectives
-
-This blog is presented in two sections.
-
-The first is a general discussion about completion tokens.
-
-The second is a practical demonstration of a production-grade completion token which adds significant utility to any
-asynchronous operation that supports the new cancellation feature that arrived in Asio 1.19 (Boost 1.77).
-
-This blog ships with an accompanying github repository in case you want to play with examples.
-The repository is [here](https://github.com/madmongo1/blog-2021-10).
-
-# Asio and the Power of Completion Tokens
-
-Asio (available [standalone](https://think-async.com/Asio/) and [in Boost](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio.html))
-defines a pattern for writing asynchronous operations. There have been a few examples in my blogs of custom composed
-operations written in terms of several asynchronous operations made available by the library.
-
-Another often overlooked feature of Asio is the ability to define a customisation point which defines the "behaviour
-during initiation and the result type" of the asynchronous initiating function.
-
-But what does that mean?
-
-Well, consider the following code:
-
-```c++
-/* void */ asio::async_read(sock, buffer, "\r\n", [](error_code ec, std::size_t n) { /* handler */ });
-```
-
-This is a verbatim (you could say old-style) asynchronous initiating function which will read from the socket into the
-buffer until either:
-- The buffer is full, or
-- the sequence `\r\n` is found in the input stream, or
-- There is some other IO error.
-
-Whichever happens, the lambda is called in the context of the _associated executor_ of the socket.
-
-(Let's call this "_the operation_")
-
-The operation is started immediately and the lambda will be invoked at some point in the future once the operation is
-complete. The initiating function returns `void`.
-
-Now consider:
-
-```c++
-auto n = co_await asio::async_read(sock, "\r\n", asio::use_awaitable);
-```
-
-This code is using the same _initiating function_ to invoke initiate the same _asynchronous operation_. However, this
-time instead of providing a _Completion Handler_ we have provided a _Completion Token_.
-
-The only difference in the two invocations is the presence of the token. The actual asynchronous operation is the same
-in both cases.
-
-However, now invocation of _the operation_ has been modified such that:
-- The initiating function returns an `asio::awaitable` which can be `co_await`ed.
-- The initiating function has been transformed into a C++20 coroutine.
-- The operation will not commence until the returned awaitable has been `co_await`ed.
-
-We can say that the completion token has implemented a customisation point at both the initiation step and the
-completion step.
-
-(For great notes on completion step I would recommend reading one of the [many excellent papers](https://isocpp.org/files/papers/P2444R0.pdf),
-[follow-ups](https://isocpp.org/files/papers/P2469R0.pdf) or
-[videos](https://www.youtube.com/watch?v=icgnqFM-aY4&t=1129s)), published by Chris Kohlhoff - the author of Asio.
-
-Here is another interesting example:
-
-```c++
-using asio::experimental::deferred;
-using asio::use_awaitable;
-
-auto my_op = asio::async_read(sock, "\r\n", deferred);
-...
-auto n = co_await my_op(use_awaitable);
-```
-
-In this case, the `async_read` initiating function has been invoked with the `deferred` _completion token_. This token
-has two side effects:
-- The _asynchronous operation_ is not actually initiated, and
-- It changes the return type to be an invocable object which when called will behave as if you called the initiating function.
-
-The returned invocable object is a unary function object whose argument is a _completion token_, which means that the
-operation can be further customised at the point of use. You can think of it as an asynchronous packaged tasks awaiting
-one last customisation before use.
-
-Note that as long as the packaged asynchronous operation is started with reference arguments or lightweight copyable
-arguments, it can be re-used and copied. All arguments of Asio and Beast initiating functions
-conform to this principle. The original design decision of passing buffers and return values by reference to
-asynchronous operations was to ensure that when operations are composed, they do not allocate memory - the caller can
-specify the memory management strategy. It so happens that this design decision, taken something like 16 years ago,
-has enabled efficient composition of completion tokens.
-
-Finally, on the subject of `deferred`, deferring a deferred initiating function yields the same deferred initiating
-function. I guess one way to think about completion tokens is that they are transforms or higher order functions
-for manipulating the initiation and result types of asynchronous operations.
-
-example:
-
-```c++
-asio::awaitable
-reader(asio::ip::tcp::socket sock)
-{
- using asio::experimental::deferred;
- using asio::use_awaitable;
- using asio::experimental::as_tuple;
-
- // An easy but not efficient read buffer
- std::string buf;
-
- // created the deferred operation object
- auto deferred_read = async_read_until(
- sock,
- asio::dynamic_buffer(buf),
- "\n",
- deferred);
-
- // deferring a deferred operation is a no-op
- auto deferred_read2 = deferred_read(deferred);
-
- // tokens are objects which can be composed and stored for later
- // The as_tuple token causes the result type to be reported as a
- // tuple where the first element is the error type. This prevents
- // the coroutine from throwing an exception.
- const auto my_token = as_tuple(use_awaitable);
-
- bool selector = false;
- for(;;)
- {
- // use each deferred operation alternately
- auto [ec, n] = co_await [&] {
- selector = !selector;
- if (!selector)
- return deferred_read(my_token);
- else
- return deferred_read2(my_token);
- }();
- if (ec)
- {
- std::cout << "reader finished: " << ec.message() << "\n";
- break;
- }
- auto view = std::string_view(buf.data(), n - 1);
- std::cout << "reader: " << view << "\n";
- buf.erase(0, n);
- }
-}
-```
-
-A table of completion tokens packaged with Asio is presented here:
-
-|token|Initiation Policy|Completion Behaviour/Result Type|Notes|
-|-----|-----------------|--------------------------------|-----|
-| [detached](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/detached.html) | Initiate immediately | Ignore all errors and results | When used with `co_spawn`, causes the spawned asynchronous chain of coroutines to have behaviour analogous to a detached thread. |
-| [experimental::deferred](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/experimental__deferred.html) | Do not initiate | Return a function object which when invoked with a completion token, behaves as if the original initiating function was called with that same token | Analogous to an asynchronous packaged task. |
-| [use_future](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/use_future.html) | Initiate immediately | Return a std::future which will yield the completion arguments | |
-| [use_awaitable](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/use_awaitable.html) | Initiate when awaited | Return an awaitable object yield the completion arguments when `co_await`ed| |
-| [yield_context](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/yield_context.html) | Initiate immediately | Yield the current stackful coroutine. Once the operation is complete, resume and return the handler arguments | |
-| [as_tuple(token)](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/experimental__as_tuple.html) | Initiate as indicated by the supplied `token` | Modify the completion arguments to be a single tuple of all arguments passed to the completion handler. For example, `void(error_code, size_t)` becomes `void(tuple)`. In practical terms this token ensures that partial success can be communicated through `use_future`, `use_awaitable` and `yield`| Very useful when used with `yield`, `use_future` or `use_awaitable` if we'd like to handle the error without exception handling or when a partial success must be detected. For example, the error_code might contain `eof` but `size` might indicate that 20 bytes were read into the buffer prior to detecting the end of stream condition. |
-| [redirect_error(token, &ec)](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/redirect_error.html) | Initiate as indicated by the supplied `token` | For operations whose first completion handler argument is an `error_code`, modify the completion handler argument list to remove this argument. For example, `void(error_code, size_t)` becomes `void(size_t)`. The error code is redirected to object referenced by `ec`| Similar to the above use, but allows overwriting the same `error_code` object which can be more elegant in a coroutine containing multiple calls to different initiating functions. |
-| [experimental::as_single(token)](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/experimental__as_single.html) | Initiate as indicated by the supplied `token` | Similar to `as_tuple` except in the case where the only argument to the completion handler is an error. In this case, the completion handler arguments are unaltered. | Experience of use suggests to me that this token is less useful than `redirect_error` and `as_tuple`. |
-| [experimental::append(token, values...)](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/experimental__append.html) | Initiate as indicated by the supplied `token` | When the completion handler is invoked, the `values...` arguments are appended to the argument list. | Provides a way to attaching more information to a completion handler invocation. [examples](https://github.com/madmongo1/blog-2021-10/blob/master/append_prepend.cpp)|
-| [experimental::prepend(token, values...)](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/experimental__prepend.html) | Initiate as indicated by the supplied `token` | When the completion handler is invoked, the `values...` arguments are prepended to the argument list. | Provides a way to attaching more information to a completion handler invocation. [examples](https://github.com/madmongo1/blog-2021-10/blob/master/append_prepend.cpp) |
-
-# A Custom Completion Token
-
-All very interesting and useful, no doubt. But what if we wanted to do something more clever.
-
-The other day I was speaking to Chris about timed cancellation. Now there are ways of doing timed cancellation that in
-Chris' view are correct and maximally performant (covered in [this video](https://www.youtube.com/watch?v=hHk5OXlKVFg)).
-However many users don't need maximum performance. What they often want is maximum teachability or maintainability.
-
-So I posed the question: "Imagine I wanted a function which took any existing Asio composed asynchronous operation and
-produced a new composed operation which represented that same operation with a timeout. How would I do that?"
-
-For example, imagine we had a deferred read operation:
-
-```c++
- auto read_op = async_read(stream, buffer, deferred);
-```
-
-Which we can invoke in a coroutine like so:
-
-```c++
- co_await read_op(use_awaitable);
-```
-
-imagine we could write:
-
-```c++
- co_await with_timeout(read_op, 5s, use_awaitable);
-```
-
-or to write it out in full:
-
-```c++
- co_await with_timeout(
- async_read(stream, buffer, deferred),
- 5s,
- use_awaitable);
-```
-
-The answer that came back was to me quite surprising: "It starts with a completion token".
-
-Which means that the way to achieve this is to write the `with_timeout` function in terms of a composition of completion
-tokens:
-
-```c++
-template
-auto with_timeout(Op op, std::chrono::milliseconds timeout, CompletionToken&& token)
-{
- return std::move(op)(timed(timeout, std::forward(token)));
-}
-```
-
-In the above code, `timed` is a function that returns a parameterised completion token. It will look something like this:
-```c++
-template
-timed_token
-timed(std::chrono::milliseconds timeout, CompletionToken&& token)
-{
- return timed_token{ timeout, token };
-}
-```
-The actual token type would look like this:
-```c++
-template
-struct timed_token
-{
- std::chrono::milliseconds timeout;
- CompletionToken& token;
-};
-```
-
-So far, so simple. But how will this work?
-
-Well, remember that a completion token controls the injection of logic around an asynchronous operation. So somehow by
-writing the token, we will get access to the packaged operation prior to it being initiated and we get access to the
-following building blocks of the async operation provided by Asio's initiation pattern:
-- The _initiation_ - this is a function object that will actually initiate the packaged asynchronous operation, and
-- The _initiation arguments_ - the arguments that were supplied to the initial initiation function. In our example above,
-these would be `stream` and `buffer`
-
-Note that the _initiation_ is an object that describes how to launch the underlying asynchronous operation, plus
-associated data such as the [_associated executor_](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/get_associated_executor.html),
-[_associated allocator_](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/get_associated_allocator.html)
-and [_associated cancellation slot_](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/get_associated_cancellation_slot.html).
-
-In Asio, the customisation point for initiating an asynchronous operation with a given completion token is the template
-class [`async_result`](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/async_result.html).
-
-Here is the specialisation:
-```c++
-// Specialise the async_result primary template for our timed_token
-template
-struct asio::async_result<
- timed_token, // specialised on our token type
- Signatures...>
-{
- // The implementation will call initiate on our template class with the
- // following arguments:
- template
- static auto initiate(
- Initiation&& init, // This is the object that we invoke in order to
- // actually start the packaged async operation
- timed_token t, // This is the completion token that
- // was passed as the last argument to the
- // initiating function
- InitArgs&&... init_args) // any more arguments that were passed to
- // the initiating function
- {
- // we will customise the initiation through our token by invoking
- // async_initiate with our own custom function object called
- // timed_initiation. We will pass it the arguments that were passed to
- // timed(). We will also forward the initiation created by the underlying
- // completion token plus all arguments destined for the underlying
- // initiation.
- return asio::async_initiate(
- timed_initiation{},
- t.token, // the underlying token
- t.timeout, // our timeout argument
- std::forward(init), // the underlying operation's initiation
- std::forward(init_args)... // that initiation's arguments
- );
- }
-};
-```
-
-It's a bit of a wall of text, but most of that is due to my comments and C++'s template syntax. In a nutshell, what this
-class is doing is implementing the function which wraps the initiation of the underlying operation (i.e the async_read)
-in an outer custom initiation which is going to add a timeout feature.
-
-All that remains is to define and implement `timed_initiation<>`, which is nothing more than a function object. We could
-have written it inline as a lambda, but for clarity it has been broken out into a separate template.
-
-[`async_initate`](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/async_initiate.html) looks
-complicated but in actual fact is doing a simple transformation:
-
-Given:
-- `tok` is a _CompletionToken_
-- `Signatures...` is a type pack of function signatures that are required to be supported by a _CompletionHandler_ built
-from `tok`.
-- `initiation` is a function object
-- `args...` is a set of arbitrary arguments
-
-`async_initiate` is a helper function which calls `async_result<>::initiate()`. Calling this will first transform `tok`
-into a _CompletionHandler_ which we will call `handler`. Then it will simply call `initiation(handler, args...)`. i.e.
-it will invoke the `initiation` with the correct completion handler and any other arguments we happen to give it.
-
-```c++
-// Note: this is merely a function object - a lambda.
-template
-struct timed_initiation
-{
- template <
- typename CompletionHandler,
- typename Initiation,
- typename... InitArgs>
- void operator()(
- CompletionHandler handler, // the generated completion handler
- std::chrono::milliseconds timeout, // the timeout specified in our completion token
- Initiation&& initiation, // the embedded operation's initiation (e.g. async_read)
- InitArgs&&... init_args) // the arguments passed to the embedded initiation (e.g. the async_read's buffer argument etc)
- {
- using asio::experimental::make_parallel_group;
-
- // locate the correct executor associated with the underling operation
- // first try the associated executor of the handler. If that doesn't
- // exist, take the associated executor of the underlying async operation's handler
- // If that doesn't exist, use the default executor (system executor currently)
- auto ex = asio::get_associated_executor(handler,
- asio::get_associated_executor(initiation));
-
- // build a timer object and own it via a shared_ptr. This is because its
- // lifetime is shared between two asynchronous chains. Use the handler's
- // allocator in order to take advantage of the Asio recycling allocator.
- auto alloc = asio::get_associated_allocator(handler);
- auto timer = std::allocate_shared(alloc, ex, timeout);
-
- // launch a parallel group of asynchronous operations - one for the timer
- // wait and one for the underlying asynchronous operation (i.e. async_read)
- make_parallel_group(
- // item 0 in the group is the timer wait
- asio::bind_executor(ex,
- [&](auto&& token)
- {
- return timer->async_wait(std::forward(token));
- }),
- // item 1 in the group is the underlying async operation
- asio::bind_executor(ex,
- [&](auto&& token)
- {
- // Finally, initiate the underlying operation
- // passing its original arguments
- return asio::async_initiate(
- std::forward(initiation), token,
- std::forward(init_args)...);
- })
- ).async_wait(
- // Wait for the first item in the group to complete. Upon completion
- // of the first, cancel the others.
- asio::experimental::wait_for_one(),
-
- // The completion handler for the group
- [handler = std::move(handler), timer](
- // an array of indexes indicating in which order the group's
- // operations completed, whether successfully or not
- std::array,
-
- // The arguments are the result of concatenating
- // the completion handler arguments of all operations in the
- // group, in retained order:
- // first the steady_timer::async_wait
- std::error_code,
-
- // then the underlying operation e.g. async_read(...)
- auto... underlying_op_results // e.g. error_code, size_t
- ) mutable
- {
- // release all memory prior to invoking the final handler
- timer.reset();
- // finally, invoke the handler with the results of the
- // underlying operation
- std::move(handler)(std::move(underlying_op_results)...);
- });
- }
-};
-```
-
-Now that the token and its specialisation of `async_result` is complete, we can trivially write a timed read from console
-that won't throw as a coroutine in one line:
-
-```c++
- // using the completion token direct
- auto [ec1, n1] = co_await async_read_until(in, dynamic_buffer(line), '\n',
- as_tuple(timed(5s, use_awaitable)));
-
- // using the function form
- auto [ec2, n2] = co_await with_timeout(
- async_read_until(in, asio::dynamic_buffer(line), '\n', deferred),
- 5s,
- as_tuple(use_awaitable));
-```
-
-The full code for this example is [here](https://github.com/madmongo1/blog-2021-10/blob/master/timed.cpp).
-Note that this example is written in terms of a posix console stream.
-To demonstrate on Windows, you would need to replace the `posix::stream_descriptor in(co_await this_coro::executor, ::dup(STDIN_FILENO));`
-with a stream type compatible with Windows, such as a socket or named pipe... or even adapt the example to use a Beast
-`http::async_read` - and presto! You have a ready-made HTTP server which applies a timeout to reading messages.
-
-Update 2021-10-11: I have since modified the example so that on windows a local tcp socket pair is created and a
-coroutine is spawned to handle the input side of things. The demo now compiles and runs with MSVC2019.
-
-# A Note on Performance
-
-It is important that I point out that this example token has been written with ease of use as the primary motivating
-factor. There is a pessimisation in its design in that use of the token allocates a new timer for every
-asynchronous operation where the timeout is being applied. This of course becomes completely un-necessary if we redesign
-the token so that we pass a reference to an existing timer to its construction function.
-
-The call-site would then look more like this:
-```c++
- auto timer = asio::steady_timer(co_await this_coro::executor, 5s);
- auto [ec1, n1] = co_await async_read_until(in, dynamic_buffer(line), '\n',
- as_tuple(use_deadline(timer, use_awaitable)));
-```
-
-Writing it this way would actually result in a simpler initiation and would ensure that the general Asio principle of
-giving the caller control over object lifetimes and allocation strategies is maintained.
-
-Another way to avoid repeated allocations of the timer while retaining the "easy mode" interface is to make use of
-Asio's execution context service facility. In this way timers would be cached in the service object associated with the
-associated executor's [`execution_context`](https://www.boost.org/doc/libs/1_77_0/doc/html/boost_asio/reference/execution_context.html).
-
-Asio was originally designed for highly scalable and latency-sensitive applications such as used in the finance,
-automotive and defence industries. Out of the box it provides the basic building blocks with which to assemble
-performance and memory-critical applications. However as it has become more widely adopted there is a growing demand for
-"easy mode" interfaces for people who just want to get things done.
-
-This message has not gone unheard. I would expect a number of interesting new features to be added to the library in
-short order.
-
-Thanks for reading.
-
-Richard Hodges
-for C++ Alliance
-[hodges.r@gmail.com](mailto:hodges.r@gmail.com)
diff --git a/_posts/2022-03-12-RFRM-on-CppCast.md b/_posts/2022-03-12-RFRM-on-CppCast.md
deleted file mode 100644
index 8c5611da9..000000000
--- a/_posts/2022-03-12-RFRM-on-CppCast.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: rene
-title: CppCast; New C++ Scope and Debugging Support
-author-id: rene
----
-
-This week I got the opportunity to chat with Rob and Jason about the
-[open letters](https://github.com/grafikrobot/cpp_scope) to the C++ committee
-I wrote on [CppCast](https://cppcast.com/standard-tooling-debugging/).
-The letters propose to expand the scope of the C++ standards work to include
-the ecosystem of tools and related technologies.
diff --git a/_posts/2022-08-10-RichardsAugustUpdate.md b/_posts/2022-08-10-RichardsAugustUpdate.md
deleted file mode 100644
index 1d2d950a3..000000000
--- a/_posts/2022-08-10-RichardsAugustUpdate.md
+++ /dev/null
@@ -1,677 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: richard
-title: Richard's August Update
-author-id: richard
----
-
-# Beast and HTTP Redirect
-
-Some months ago, I was asked how to handle HTTP redirect responses in beast.
-Characteristically, I took a moment to model how I would do that in my head, waved my hands and kind of explained it and
-that was that.
-
-Then more recently, someone else asked how beast websockets would handle a redirect response when performing a websocket
-handshake. Now I'm pretty sure that websocket clients have no requirement at all to follow redirects. I believe the
-WebSocket specification does not allow for such things, but I thought it would be an interesting exercise to cover the
-topic and provide a working code example and cover it in a blog post.
-
-There are a few reasons I decided to do this:
-- Redirects are going to be important for any client-side framework written on Beast.
-- There are a few new features in Asio which I thought it would be interesting to showcase.
-
-## Code Repositiory
-
-The code for this blog can be found [here](https://github.com/madmongo1/blog-2022-Aug-websock-redirect).
-I have tested it on Fedora 36 and GCC-12. The code requires at least boost-1.80.0.beta1, because it takes advantage of
-the new change in Asio, which allows the deferred object returned by the `asio::deferred` completion token to be
-directly `co_await`ed. This provides a significant improvement in performance for operations that don't need the full
-functionality of the `asio::awaitable<>` type.
-
-## Handling a Redirect - General Case
-
-Redirects can be followed with the following generalised algorithm:
-
-```
-
-set redirect count to 0
-
-while not connected, no egregious errors and redirect limit has not been exceeded:
-
- crack URL
- resolve the FQDN of the host specified in the URL
- connect to the host
-
- if URL indicates HTTPS:
- negotiate TLS
- endif
-
- send http request (upgrade request for websocket)
-
- await response
-
- if response is 200ish:
- exit success
- elseif response is redirect:
- increment redirect count
- update URL with data found in Location header field
- continue
- else
- exit error
- endif
-
-endwhile
-
-```
-
-## Handling a Redirect in C++ with Beast
-
-It turns out that the entire process can be handled in one coroutine.
-Now remember that an HTTP connection can redirect to an HTTPS connection. So the "connection" type returned from a
-coroutine that creates a connection, having taken into account any redirects, must handle both transport types.
-
-It's worth mentioning at this point that if you're writing for a modern Linux kernel, TLS is now supported natively
-by the berkley sockets interface. This means that programs need no longer generate one code path for SSL and one for
-TCP. If this is interesting to you, there is some documentation [here](https://www.kernel.org/doc/html/v5.12/networking/tls.html).
-When I get a moment I will create a modified copy of this program that uses Kernel TLS. However, for now, we do it the
-old-fashioned portable way.
-
-### A connection abstraction
-
-First we define some useful types for our variant implementation
-```cpp
-struct websock_connection
-{
- using ws_stream = beast::websocket::stream< tcp::socket >;
- using wss_stream = beast::websocket::stream< ssl::stream< tcp::socket > >;
- using var_type = boost::variant2::variant< ws_stream, wss_stream >;
-```
-...provide TLS and SSL constructors...
-
-```cpp
- websock_connection(tcp::socket sock)
- : var_(ws_stream(std::move(sock)))
- {
- }
-
- websock_connection(ssl::stream< tcp::socket > stream)
- : var_(wss_stream(std::move(stream)))
- {
- }
-```
-
-...provide access to the underlying (optional) SSL and TCP streams...
-
-```cpp
- tcp::socket &
- sock();
-
- ssl::stream< tcp::socket > *
- query_ssl();
-
-```
-
-... provide functions that return awaitables for the high level functions we will need...
-
-```cpp
- asio::awaitable< void >
- try_handshake(error_code &ec,
- beast::websocket::response_type &response,
- std::string hostname,
- std::string target);
-
- asio::awaitable< std::size_t >
- send_text(std::string const &msg);
-
- asio::awaitable< std::string >
- receive_text();
-
- asio::awaitable< void >
- close(beast::websocket::close_reason const &reason);
-
-```
-...and finally the implementation details...
-```cpp
- var_type var_;
- beast::flat_buffer rxbuffer_;
-};
-```
-The implementation of the various member functions are then all defined in terms of `visit`, e.g.:
-
-```cpp
-asio::awaitable< std::size_t >
-websock_connection::send_text(std::string const &msg)
-{
- using asio::use_awaitable;
-
- return visit(
- [&](auto &ws)
- {
- ws.text();
- return ws.async_write(asio::buffer(msg), use_awaitable);
- },
- var_);
-}
-```
-Note that this function is not actually a coroutine. Since it doesn't maintain any state during the async operation,
-the function can simply return the `awaitable` to the calling coroutine. This saves the creation of a coroutine frame
-when we don't need it.
-
-The interface and implementation for this class can be found in `websocket_connection.[ch]pp` in the git repo linked
-above.
-
-### Moveable `ssl::stream`?
-
-You may have noticed something in this constructor:
-```cpp
- websock_connection(ssl::stream< tcp::socket > stream)
- : var_(wss_stream(std::move(stream)))
- {
- }
-```
-I have `std::move`'d the ssl stream into the WebSocket stream. Until a few versions ago, asio ssl streams were not
-moveable, which caused all kinds of issues when wanting to, for example, upgrade an SSL stream connection to a secure
-websocket stream.
-
-The Beast library has two workarounds for this:
-1. Beast provides its own version of ssl::stream, and
-2. `beast::websocket::stream` has a specialisation defined which holds a reference to a stream.
-
-These are probably now un-necessary and could arguably be deprecated.
-
-## The algorithm in C++20
-
-```cpp
-asio::awaitable< std::unique_ptr< websock_connection > >
-connect_websock(ssl::context &sslctx,
- std::string urlstr,
- int const redirect_limit = 5)
-{
- using asio::experimental::deferred;
-
- // for convenience, take a copy of the current executor
- auto ex = co_await asio::this_coro::executor;
-
- // number of redirects detected so far
- int redirects = 0;
-
- // build a resolver in order tp decode te FQDNs in urls
- auto resolver = tcp::resolver(ex);
-
- // in the case of a redirect, we will resume processing here
-again:
- fmt::print("attempting connection: {}\n", urlstr);
-
- // decode the URL into components
- auto decoded = decode_url(urlstr);
-```
-This part of the code builds a unique pointer to an initialised `websocket_connection` object, initialised with either
-an SSL stream or a TCP stream as indicated by the result of cracking the URL. For brevity I have used a regex to crack
-the URL, but you should check out Vinnie Falco's new Boost.URL candidate library [here](https://github.com/CPPAlliance/url).
-Vinnie will be looking for reviewers during this library's submission to Boost later this month, so do keep an eye out
-in the Boost mailing list.
-
-```cpp
- // build the appropriate websocket stream type depending on whether the URL
- // indicates a TCP or TLS transport
- auto result = decoded.transport == transport_type::tls
- ? std::make_unique< websock_connection >(
- ssl::stream< tcp::socket >(ex, sslctx))
- : std::make_unique< websock_connection >(tcp::socket(ex));
-
-```
-
-Here we are awaiting a connect operation with the result of awaiting a resolve operation. Note the use of
-`asio::experimental::deferred`. `deferred` is quite a versatile completion token which can be used to:
-- return an lightweight awaitable, as demonstrated here,
-- return a function object which may be later called multiple times with another completion handler;
- effectively creating a curried initiation,
-- be supplied with a completion handler up front in order to create a deferred sequence of chained asynchronous
- operations; allowing simple composed operations to be built quickly and easily.
-
-```cpp
- // connect the underlying socket of the websocket stream to the first
- // reachable resolved endpoint
- co_await asio::async_connect(
- result->sock(),
- co_await resolver.async_resolve(
- decoded.hostname, decoded.service, deferred),
- deferred);
-```
-
-In the case that the endpoint we are connecting to is secure, we must do the SSL/TLS handshake:
-
-```cpp
- // if the connection is TLS, we will want to update the hostname
- if (auto *tls = result->query_ssl(); tls)
- {
- if (!SSL_set_tlsext_host_name(tls->native_handle(),
- decoded.hostname.c_str()))
- throw system_error(
- error_code { static_cast< int >(::ERR_get_error()),
- asio::error::get_ssl_category() });
- co_await tls->async_handshake(ssl::stream_base::client, deferred);
- }
-
- // some variables to receive the result of the handshake attempt
- auto ec = error_code();
- auto response = beast::websocket::response_type();
-```
-The function try_handshake simply initiates the form of websocket handshake operation which preserves the
-http response returned from the server. We will need this in case the websocket connection response is actually a
-redirect.
-
-```cpp
- // attempt a websocket handshake, preserving the response
- fmt::print("...handshake\n");
- co_await result->try_handshake(
- ec, response, decoded.hostname, decoded.path_etc);
-
- // in case of error, we have three scenarios, detailed below:
- if (ec)
- {
- fmt::print("...error: {}\n{}", ec.message(), stitch(response.base()));
- auto http_result = response.result_int();
- switch (response.result())
- {
-```
-
-And here is the code that handles the actual redirect. Note that in this simplistic implementation, I am replacing the
-URL with the `Location` field in the web server's response. In reality, the returned URL could be a relative URL which
-would need to be merged into the original URL. [Boost.URL](https://github.com/CPPAlliance/url) handles this nicely.
-Once that library is available I'll upgrade this example.
-
-```cpp
- case beast::http::status::permanent_redirect:
- case beast::http::status::temporary_redirect:
- case beast::http::status::multiple_choices:
- case beast::http::status::found:
- case beast::http::status::see_other:
- case beast::http::status::moved_permanently:
- //
- // Scenario 1: We have been redirected
- //
- if (response.count(beast::http::field::location))
- {
- if (++redirects <= redirect_limit)
- {
- // perform the redirect by updating the URL and jumping to
- // the goto label above.
- auto &loc = response[beast::http::field::location];
- urlstr.assign(loc.begin(), loc.end());
- goto again;
- }
- else
- {
- throw std::runtime_error("too many redirects");
- }
- }
- else
- {
- //
- // Scenario 2: we have some other HTTP response which is not an
- // upgrade
- //
- throw system_error(ec,
- stitch("malformed redirect\r\n", response));
- }
- break;
-
- default:
- //
- // Scenario 3: Some other transport error
- //
- throw system_error(ec, stitch(response));
- }
- }
- else
- {
- //
- // successful handshake
- //
- fmt::print("...success\n{}", stitch(response.base()));
- }
-
- co_return result;
-}
-```
-
-So with that written, all we need to do is write a simple coroutine to connect, chat and disconnect in order to test:
-
-```cpp
-asio::awaitable< void >
-comain(ssl::context &sslctx, std::string initial_url)
-{
- auto connection = co_await connect_websock(sslctx, initial_url, 6);
- co_await echo(*connection, "Hello, ");
- co_await echo(*connection, "World!\n");
- co_await connection->close(beast::websocket::close_reason(
- beast::websocket::close_code::going_away, "thanks for the chat!"));
- co_return;
-}
-```
-
-## A Simple Http/WebSocket Server
-
-In order to test this code, I put together a super-simple web server, which is included in the repo and run as part of
-the demo program.
-
-This web server runs two coroutines, each with its own acceptor. One is the acceptor for HTTP/WS connections and the other
-is for HTTPS/WSS connections. Of course I could have used beast's
-[flex helper](https://www.boost.org/doc/libs/1_79_0/libs/beast/doc/html/beast/ref/boost__beast__async_detect_ssl.html)
-to auto-deduce WS/WSS on the same port, but I wanted to keep the implementation as simple as possible.
-
-The HTTP server is very simple. All it does is redirect the caller to the same `Target` on the WSS server:
-
-```cpp
-asio::awaitable< void >
-serve_http(tcp::socket sock, std::string https_endpoint)
-{
- using asio::experimental::deferred;
-
- auto rxbuf = beast::flat_buffer();
- auto parser = beast::http::request_parser< beast::http::empty_body >();
- co_await beast::http::async_read(sock, rxbuf, parser, deferred);
-
- static const auto re = std::regex("(/websocket-\\d+)(/.*)?",
- std::regex_constants::icase |
- std::regex_constants::optimize);
- auto match = std::cmatch();
- auto &request = parser.get();
- if (std::regex_match(
- request.target().begin(), request.target().end(), match, re))
- {
- co_await send_redirect(
- sock, fmt::format("{}{}", https_endpoint, match[0].str()));
- }
- else
- {
- co_await send_error(
- sock,
- beast::http::status::not_found,
- fmt::format("resource {} is not recognised\r\n",
- std::string_view(request.target().data(),
- request.target().size())));
- }
-}
-```
-
-The WSS server is minutely more complex. It looks for a URL of the form `/websocket-(\d+)(/.*)?` where group 1 is the
-"index number" of the request. If the index number is 0, the websocket request is accepted and we head off into a chat
-coroutine for the remainder of the connection. If it is non-zero, then the index is decremented, the URL is
-reconstructed with the new index, and the redirect response is sent back.
-
-So if for example you requested `http://some-server/websocket-2/bar`, you would be redirected along the following path:
-- `https://some-server/websocket-2/bar` (first http to https transition)
-- `https://some-server/websocket-1/bar`
-- `https://some-server/websocket-0/bar` (websocket handshake accepted on this URL)
-
-Here's the code:
-
-```cpp
-asio::awaitable< void >
-serve_https(ssl::stream< tcp::socket > stream, std::string https_fqdn)
-{
- try
- {
- using asio::experimental::deferred;
-
- co_await stream.async_handshake(ssl::stream_base::server, deferred);
-
- auto rxbuf = beast::flat_buffer();
- auto request = beast::http::request< beast::http::string_body >();
- co_await beast::http::async_read(stream, rxbuf, request, deferred);
-
- auto &sock = stream.next_layer();
- if (beast::websocket::is_upgrade(request))
- {
- static const auto re = std::regex(
- "/websocket-(\\d+)(/.*)?",
- std::regex_constants::icase | std::regex_constants::optimize);
- auto match = std::cmatch();
- if (std::regex_match(request.target().begin(),
- request.target().end(),
- match,
- re))
- {
- auto index = ::atoi(match[1].str().c_str());
- if (index == 0)
- {
- auto wss =
- beast::websocket::stream< ssl::stream< tcp::socket > >(
- std::move(stream));
- co_await wss.async_accept(request, deferred);
- co_await run_echo_server(wss, rxbuf);
- // serve the websocket
- }
- else
- {
- // redirect to the next index down
- auto loc = fmt::format("{}/websocket-{}{}",
- https_fqdn,
- index - 1,
- match[2].str());
- co_await send_redirect(stream, loc);
- }
- }
- else
- {
- co_await send_error(stream,
- beast::http::status::not_found,
- "try /websocket-5\r\n");
- }
- }
- else
- {
- co_await send_error(
- stream,
- beast::http::status::not_acceptable,
- "This server only accepts websocket requests\r\n");
- }
- }
- catch (system_error &e)
- {
- fmt::print("serve_https: {}\n", e.code().message());
- }
- catch (std::exception &e)
- {
- fmt::print("serve_https: {}\n", e.what());
- }
-```
-
-The `run_echo_server` coroutine is about as simple as it gets. Note the use of `deferred` as a completion token in order
-to create the lightweight awaitable type.
-
-```cpp
-asio::awaitable< void >
-run_echo_server(beast::websocket::stream< ssl::stream< tcp::socket > > &wss,
- beast::flat_buffer &rxbuf)
-{
- using asio::experimental::deferred;
-
- for (;;)
- {
- auto size = co_await wss.async_read(rxbuf, deferred);
- auto data = rxbuf.cdata();
- co_await wss.async_write(data, deferred);
- rxbuf.consume(size);
- }
-}
-```
-
-## An Example of Cancellation
-
-The server is trivial, but there is one little feature I wanted to demonstrate.
-
-The purpose of the demo is:
-- spin up a web server
-- connect to the web server a few times and have a chat with it
-- exit the program
-
-This then leaves the issue of causing the web server to shut down so as to release its ownership of the underlying
-io_context run operation. i.e. if the io_context doesn't run out of work, the call to `io_context::run()` won't return.
-
-I have taken advantage of the fact that when coroutines are spawned with an associated cancellation slot, the
-cancellation slot tree propagates down through all child coroutines and asio operations.
-
-So it becomes as simple as:
-
-Define a cancellation signal:
-
-```cpp
- auto stop_sig = asio::cancellation_signal();
-```
-
-Run the server, passing in the cancellation signal's slot:
-```cpp
- svr.run(stop_sig.slot());
-```
-
-When the client code has completed, it simply needs to cause the signal to emit:
-
-```cpp
- co_spawn(ioc,
- comain(ioctx, initial_url),
- [&](std::exception_ptr ep)
- {
- ```
-We emit the signal regardless of whether the client ended in an error or not - we want to stop the server in either case
-```cpp
- stop_sig.emit(asio::cancellation_type::all);
- try
- {
- if (ep)
- std::rethrow_exception(ep);
- }
- catch (std::exception &e)
- {
- fmt::print("client exception: {}\n", e.what());
- }
- });
-
-```
-
-Within the server, we spawn the internal coroutines bound to the cancellation slot. This will cause the slot to
-propagate the signal into the subordinate coroutines, causing whatever they are doing to complete with an
-`operation_aborted` error.
-
-```cpp
-void
-server::run(asio::cancellation_slot stop_slot)
-{
-```
-`awaitable_operators` makes dealing with parallel coroutines extremely simple.
-```cpp
- using namespace asio::experimental::awaitable_operators;
- using asio::bind_cancellation_slot;
- using asio::co_spawn;
- using asio::use_awaitable;
-
- fmt::print("server starting\n");
-
- auto handler = [](std::exception_ptr ep)
- {
- try
- {
- if (ep)
- std::rethrow_exception(ep);
- }
- catch (asio::multiple_exceptions &es)
- {
- print_exceptions(es.first_exception());
- }
- catch (std::exception &e)
- {
- print_exceptions(e);
- }
- };
-```
-
-Here we are creating an outer coroutine which represents the simultaneous execution of the two inner coroutines,
-`http_server` and `wss_server`. The completion token of this outer coroutine is bound to the supplied cancellation slot.
-When this slot is invoked, it will propagate the signal into the two subordinate coroutines.
-
-```cpp
- co_spawn(get_executor(),
- http_server(tcp_acceptor_, tls_root_) &&
- wss_server(sslctx_, tls_acceptor_, tls_root_),
- bind_cancellation_slot(stop_slot, handler));
-}
-```
-
-## Final output
-
-Here is an example of the output generated by this program, tracking the various redirects and correct shutdown of all
-IO operations.
-
-```text
-$ ~/github/madmongo1/blog-2022-Aug-websock-redirect/cmake-build-debug/blog-2022-aug-websock-redirect
-Initialising
-server starting
-attempting connection: ws://127.0.0.1:38503/websocket-4
-...handshake
-...error: The WebSocket handshake was declined by the remote peer
-HTTP/1.1 301 Moved Permanently
-Location: wss://127.0.0.1:45141/websocket-4
-Connection: close
-Content-Length: 54
-
-attempting connection: wss://127.0.0.1:45141/websocket-4
-...handshake
-...error: The WebSocket handshake was declined by the remote peer
-HTTP/1.1 301 Moved Permanently
-Location: wss://127.0.0.1:45141/websocket-3
-Connection: close
-Content-Length: 54
-
-attempting connection: wss://127.0.0.1:45141/websocket-3
-...handshake
-...error: The WebSocket handshake was declined by the remote peer
-HTTP/1.1 301 Moved Permanently
-Location: wss://127.0.0.1:45141/websocket-2
-Connection: close
-Content-Length: 54
-
-attempting connection: wss://127.0.0.1:45141/websocket-2
-...handshake
-...error: The WebSocket handshake was declined by the remote peer
-HTTP/1.1 301 Moved Permanently
-Location: wss://127.0.0.1:45141/websocket-1
-Connection: close
-Content-Length: 54
-
-attempting connection: wss://127.0.0.1:45141/websocket-1
-...handshake
-...error: The WebSocket handshake was declined by the remote peer
-HTTP/1.1 301 Moved Permanently
-Location: wss://127.0.0.1:45141/websocket-0
-Connection: close
-Content-Length: 54
-
-attempting connection: wss://127.0.0.1:45141/websocket-0
-...handshake
-...success
-HTTP/1.1 101 Switching Protocols
-Upgrade: websocket
-Connection: upgrade
-Sec-WebSocket-Accept: N5wCr5WUOM6LxN8I4If7oR8QW3A=
-Server: Boost.Beast/330
-
-Hello, World!
-serve_https: The WebSocket stream was gracefully closed at both endpoints
-http_server: Operation canceled
-wss_server: Operation canceled
-Finished
-
-Process finished with exit code 0
-```
-
-# Final Note
-
-I have of course cut many corners in this demonstration. The error handling is a bit ropey and I haven't considered
-timeouts, connection re-use, etc.
-
-But hopefully this will be useful to anyone reading.
-
-Until next time.
diff --git a/_posts/2022-11-16-KlemensBoost181.md b/_posts/2022-11-16-KlemensBoost181.md
deleted file mode 100644
index c19101182..000000000
--- a/_posts/2022-11-16-KlemensBoost181.md
+++ /dev/null
@@ -1,296 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: boost-release
-title: New in Boost 1.81
-author-id: klemens
----
-
-# New Library: Url
-
-Boost.url has been released. And it's [awesome](https://www.boost.org/doc/libs/master/libs/url/doc/html/url/overview.html).
-
-# Json & Describe
-
-Boost.json is now integrated with Boost.describe. That means that any `class`, `struct`, or `enum`
-that has describe annotations can be directly serialized to and from json.
-
-You can fine examples [here](https://www.boost.org/doc/libs/master/libs/describe/doc/html/describe.html#example_to_json).
-
-Additionally, `variant2` is also supported.
-
-# Unordered
-
-Unordered got a new map type, [`unordered_flat_map`](https://www.boost.org/doc/libs/master/libs/unordered/doc/html/unordered.html#unordered_flat_map),
-and the corresponding set type [`unordered_flat_set`](https://www.boost.org/doc/libs/master/libs/unordered/doc/html/unordered.html#unordered_flat_set).
-
-These two containers lay out the map in a flat array, instead of being node-based.
-On modern CPU, this open addressing can lead to significant performance increases,
-due to improved cache-usage.
-
-Joaquín, the author, has written a detailed [blog post](https://bannalia.blogspot.com/2022/11/inside-boostunorderedflatmap.html),
-which is highly recommended.
-
-# Beast
-
-## Per operation cancellation
-
-Beast supports per-operation cancellation now. This is mostly `terminal` cancellation,
-i.e. you can't do anything with the io-object but to close it afterwards.
-This is still useful for completion methods that automatically wire up cancellations, such as `asio::awaitable`s (for which beast also has examples).
-
-In a few cases beast does allow `total` cancellation (cancellation without side effects).
-This is the case in certain situations with websockets, when the operation gets blocked because of ongoing control messages such as ping or pong.
-
-Generally it should however be treated as if beast only support `terminal` cancellation due to the protocol limitations.
-
-
-## Adressing the dynamic-buffer disconnect.
-
-When beast was originally conceived, asio did not have a clear dynamic-buffer concept.
-This lead to beast developing it's own buffer types in parallel, which have very close semantics.
-Asio however went one step further under the guidance of WG21, and developed a dynamic buffer version 2,
-which is much more complicated and a questionable improvement.
-
-Since boost still supports dynamic buffer v1, unless explicitly told not to, it was little work to
-make them compatible.
-The major difference is that `asio`'s buffers are passed by copy, while `beast`s need to be passed by reference.
-
-```cpp
-std::string buffer; // < the dynamic buffer will point there
-asio::read_until(socket, asio::dynamic_buffer(buffer), '\n');
-```
-
-
-This surely was the source of many bugs, as the following code compiles fine:
-
-```cpp
-beast::flat_buffer buffer;
-asio::read_until(socket, buffer, '\n');
-```
-
-When run however, the buffer seems to be empty. The reason is that the buffer gets copied by `read_until`,
-meaning the data gets written into a buffer, that will get destroyed.
-
-To help with that, beast now provides a `buffer_ref` class that captures by reference and can then freely be copied:
-
-```cpp
-beast::flat_buffer buffer;
-asio::read_until(socket, ref(buffer), '\n');
-```
-
-`ref` is a function to do the proper template resolution.
-
-# Asio
-
-## Semantic changes
-
-Asio's semantic requirements have changed slightly regarding `post` and executors.
-When a composed operation runs into an error before it's first op,
-a common pattern is to `post` once, to avoid recursion.
-Usually this post will happen on the executor of the completion handler,
-since this is the handler that we need to invoke the handler on anyhow.
-
-```cpp
-void run_my_op(
- tcp::socket & sock, // io_exec
- thread_pool::executor_type work_exec,
- std::function my_completion)
-{
- async_read_my_message(sock, asio::bind_executor(work_exec, my_completion));
-}
-
-```
-
-In the above code `async_read_my_message` is a composed operation that gets one message from the socket, which runs on `io_exec` and it's suppoesed to invoke the completion on `work_exec`.
-
-Let's say, our `async_read_my_message` op, checks if `sock.is_open` and if not, wants to immediately complete.
-This seems ok-ish, but what happenes if the `io_exec` isn't running? In any other case,
-the operation will only complete if `io_exec` is running, except for the early error.
-Thus the correct executor to `post` to is `io_exec`, after which the completion gets `dispatch`ed to `work_exec.
-
-Because of this, the executor requirements for associated executor are not relaxed,
-so that it does not need to support `post`.
-The precise requirements can be found in the documentation for the polymorphic wrappers
-[any_completion_executor](https://www.boost.org/doc/libs/master/doc/html/boost_asio/reference/any_completion_executor.html) and [any_io_executor](https://www.boost.org/doc/libs/master/doc/html/boost_asio/reference/any_io_executor.html).
-
-Additionally, `async_compose` provides a handle (commonly called `self`) with a `get_io_executor()` member.
-
-*Note that beast is not yet compliant with this as of 1.81.*
-
-## `any_completion_handler`
-
-Another interesting addition to asio is the `any_completion_handler` class,
-that can be used to type-erase completion handlers (not to be confused with tokens).
-
-Introducing a minor run-time overhead, it can speed up compile times,
-because heavy async operations can be moved into source files and only built once.
-
-At the same time, it can be wrapped in an async_initiate statement,
-allowing the use of all completion tokens, e.g. `use_awaitable`.
-
-E.g.:
-
-```cpp
-// this goes into the source file
-void my_async_write_impl(asio::ip::tcp::socket & sock,
- asio::const_buffer buffer,
- asio::any_completion_handler cpl)
-{
- asio::async_write(sock, buffer, std::move(cpl));
-}
-
-/// header file
-
-
-template
-auto my_async_write(asio::ip::tcp::socket & sock,
- asio::const_buffer buffer,
- Token && token)
-{
- return asio::async_initiate(
- [&](asio::any_completion_handler cpl)
- {
- my_async_write_impl(sock, buffer, std::move(cpl));
- },
- token);
-}
-
-// use it
-
-co_await my_async_write(my_socket, my_buffer, asio::use_awaitable);
-
-```
-
-Note that the above described semantic changes apply; that is, the associated executor of an `any_completion_handler` cannot be use in `asio::post`.
-
-## Awaiting async_operations
-
-The new version also introduces the concept (actual concept in C++20) of an `async_operation`.
-It describes an expression that can be inovked with a completion-token, e.g.:
-
-```cpp
-asio::steady_timer tim{co_await asio::this_coro::executor};
-asio::async_operation auto my_op =
- [&](auto && token) {return tim.async_wait(std::move(token));}
-```
-
-The interesting part here is that an async-operation, in addition to being usable in `parallel_group`s or the also new `ranged_parallel_group`, can be directly awaited in `asio::awaitable`'s and `asio::experimental::coro`s.
-
-```cpp
-co_await my_op;
-```
-
-The nice thing here is that we can avoid the additional coroutine frame (which includes an allocation),
-that `use_awaitable` (or `use_coro`) needs in order to return an `awaitable`.
-
-Additionally, `experimental::promise` has been refactored, so that it doesn't use a `.async_wait` member function,
-but `operator()` as well. That is, any `experimental::promise` is an async-operation.
-
-```cpp
-auto p = tim.async_wait(experimental::use_promise);
-co_await std::move(p);
-```
-
-## co_compose
-
-Another useful feature for library developers that can use C++20 is the experimental `co_composed`,
-which is a low-level coroutine based replacement for `async_compose`.
-
-Consider the following example from the asio docs:
-
-```cpp
-template
-auto async_echo(tcp::socket& socket,
- CompletionToken&& token)
-{
- return boost::asio::async_initiate<
- CompletionToken, void(boost::system::error_code)>(
- boost::asio::experimental::co_composed(
- [](auto state, tcp::socket& socket) -> void
- {
- state.reset_cancellation_state(
- boost::asio::enable_terminal_cancellation());
-
- while (!state.cancelled())
- {
- char data[1024];
- auto [e1, n1] =
- co_await socket.async_read_some(
- boost::asio::buffer(data),
- boost::asio::as_tuple(boost::asio::deferred));
-
- if (e1)
- co_yield state.complete(e1);
-
- if (!!state.cancelled())
- co_yield state.complete(
- make_error_code(boost::asio::error::operation_aborted));
-
- auto [e2, n2] =
- co_await boost::asio::async_write(socket,
- boost::asio::buffer(data, n1),
- boost::asio::as_tuple(boost::asio::deferred));
-
- if (e2)
- co_yield state.complete(e2);
- }
- }, socket),
- token, std::ref(socket));
-}
-```
-
-Writing this as `async_compose` & `asio::coroutine` would look like this:
-
-{% raw %}
-```cpp
-struct async_echo_implementation : boost::asio::coroutine
-{
- tcp::socket & socket;
-
- // - can't be a member, sicne this struct gets moved
- // - should be allocated using the associated allocator, but this is an example.
- std::unique_ptr data{new char[1024]};
-
- template
- void operator()(Self && self, system::error_code ec = {}, std::size_t n = 0u)
- {
- reenter(this)
- {
- while (!self.cancelled())
- {
- yield socket.async_read_some(
- boost::asio::buffer(data.get(), 1024),
- std::move(self));
-
- if (ec)
- return self.complete(ec);
- if (!!self.cancelled())
- return self.complete(boost::asio::error::operation_aborted);
-
- yield boost::asio::async_write(socket,
- boost::asio::buffer(data.get(), n),
- std::move(self));
-
- if (ec)
- return self.complete(ec);
- }
- }
- self.complete({});
- }
-};
-
-template
-auto async_echo(tcp::socket& socket,
- CompletionToken&& token)
-{
- return boost::asio::async_compose(
- async_echo_implementation{{}, socket},
- token, socket);
-}
-```
-{% endraw %}
-
-Not only is the state management easier, but it also doesn't need to move the state (i.e. coroutine frame),
-that it can become more performant.
diff --git a/_posts/2022-12-11-Asio201Deferred.md b/_posts/2022-12-11-Asio201Deferred.md
deleted file mode 100644
index ef34a6a15..000000000
--- a/_posts/2022-12-11-Asio201Deferred.md
+++ /dev/null
@@ -1,236 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: asio
-title: Asio 201 - deferred
-author-id: klemens
----
-
-# Asio deferred
-
-## Aysnc operations
-
-Asio introduced the concept of an async_operation, which describes a primary expression
-that can be invoked with a completion token. In C++20 this is also a language concept.
-
-```cpp
-asio::io_context ctx;
-asio::async_operation auto post_op = [&](auto && token){return asio::post(ctx, std::move(token));};
-
-auto f = post_op(asio::use_future);
-ctx.run();
-f.get(); // void
-```
-
-Async operations can be used in `parallel_group` and directly `co_await`ed in C++20.
-
-## `asio::deferred` as a completion token
-
-Using `asio::deferred` as a completion token, will give you a lazy
-`async_operation` as the result value.
-
-```cpp
-asio::io_context ctx;
-asio::async_operation auto post_op = asio::post(ctx, asio::deferred);
-
-auto f = std::move(post_op)(asio::use_future);
-ctx.run();
-f.get(); // void
-```
-
-## deferred expressions
-
-Additionally, a deferred can be invoked with a function object that returns another deferred expression. E.g.:
-
-```cpp
-asio::io_context ctx;
-asio::async_operation auto post_op = asio::post(ctx, asio::deferred);
-asio::async_operation auto double_post_op =
- asio::post(asio::deferred([&]{return post_op;}));
-
-auto f = std::move(double_post_op)(asio::use_future);
-ctx.run();
-f.get(); // void
-```
-
-This now will call two posts subsequently.
-
-Not every deferred expression however is an async_operation, deferred provides multiple utilities.
-
-## `deferred.values`
-
-`asio.values` is a deferred expression that just returns values, so that you can modify the completion signature.
-
-```cpp
-asio::io_context ctx;
-asio::async_operation auto post_int_op =
- asio::post(ctx,
- asio::deferred(
- []
- {
- return asio::deferred.values(42);
- }
- ));
-
-auto f = std::move(post_int_op)(asio::use_future);
-ctx.run();
-assert(f.get() == 42); // int
-```
-
-This already can be useful to modify completion signatures, similar to `asio::append` and `asio::prepend`.
-
-## `deferred.when`
-
-Next deferred provides a conditional, that takes two deferred expressions.
-
-```cpp
-auto def = asio::deferred.when(condition).then(def1).otherwise(def2);
-```
-
-This can be used for simple continuations with error handling.
-Let's say we want to read some memory from `socket1` and write to `socket2`.
-
-```cpp
-extern asio::ip::tcp::socket socket1, socket2;
-char buf[4096];
-
-auto forward_op =
- socket1.async_read_some(
- buf,
- asio::deferred(
- [&](system::error_code ec, std::size_t n)
- {
- return asio::deferred
- .when(!!ec) // complete with the error and `n`
- .then(asio::deferred.values(ec, n))
- .otherwise(
- asio::async_write(socket2,
- asio::buffer(buf, n),
- asio::deferred));
- }
- ));
-```
-
-## Multiple `deferred`s
-
-Since all the calls with `deferred` yield async_operations, we can combine more than two, just by invoking the resulting expression. Let's say we want to add a delay at the end of the operation above, we can simple add another deferred.
-
-```cpp
-extern asio::ip::tcp::socket socket1, socket2;
-extern asio::steady_timer delay;
-char buf[4096];
-
-auto forward_op =
- socket1.async_read_some(
- asio::buffer(buf),
- asio::deferred(
- [&](system::error_code ec, std::size_t n)
- {
- return asio::deferred
- .when(!!ec) // complete with the error and `n`
- .then(asio::deferred.values(ec, n))
- .otherwise(
- asio::async_write(socket2,
- asio::buffer(buf, n),
- asio::deferred));
- }
- ))
- (
- asio::deferred(
- [&](system::error_code ec, std::size_t n)
- {
- return asio::deferred
- .when(!!ec)
- .then(asio::deferred.values(ec, n))
- .otherwise(
- delay.async_wait(asio::append(asio::deferred, n))
- );
- }
- )
- );
-```
-
-This now gives us a simple composed operation with three steps.
-It also gets increasingly unreadable, which is why asio provides
-
-## `operator|`
-
-Instead of invoking the deferred expression multiple times, you can also just write this:
-
-```cpp
-extern asio::ip::tcp::socket socket1, socket2;
-extern asio::steady_timer delay;
-char buf[4096];
-
-auto forward_op =
- socket1.async_read_some(asio::buffer(buf), asio::deferred)
- | asio::deferred(
- [&](system::error_code ec, std::size_t n)
- {
- return asio::deferred
- .when(!!ec) // complete with the error and `n`
- .then(asio::deferred.values(ec, n))
- .otherwise(
- asio::async_write(socket2,
- asio::buffer(buf, n),
- asio::deferred));
- }
- )
- | asio::deferred(
- [&](system::error_code ec, std::size_t n)
- {
- return asio::deferred
- .when(!!ec)
- .then(asio::deferred.values(ec, n))
- .otherwise(
- delay.async_wait(asio::append(asio::deferred, n))
- );
- });
-```
-
-
-## Readable code
-
-It should be quite clear that the complexity can get out of hand rather quickly, which is why you should consider separating the continuation functions from the deferred chain.
-
-This can be achieved with by using `append` to pass pointers to the
-io objects, like so:
-
-
-```cpp
-auto do_read(asio::ip::tcp::socket * socket1,
- asio::ip::tcp::socket * socket2,
- char * buf, std::size_t n)
- {
- return socket1->async_read_some(
- asio::buffer(buf, n),
- asio::append(asio::deferred, socket1, buf));
- };
-
-auto do_write(system::error_code ec, std::size_t n,
- asio::ip::tcp::socket * socket2, char * buf)
- {
- return asio::deferred
- .when(!!ec) // complete with the error and `n`
- .then(asio::deferred.values(ec, n))
- .otherwise(
- asio::async_write(*socket2,
- asio::buffer(buf, n),
- asio::deferred));
- };
-
-template
-auto forward_op(
- asio::ip::tcp::socket & socket1,
- asio::ip::tcp::socket & socket2,
- char (&buf)[Size])
-{
- return asio::deferred.values(
- &socket1, &socket2, &buf[0], Size)
- | asio::deferred(&do_read)
- | asio::deferred(&do_write);
-}
-```
-
-
-More examples can be found in the [asio repo](https://github.com/chriskohlhoff/asio/tree/master/asio/src/examples/cpp14/deferred).
\ No newline at end of file
diff --git a/_posts/2023-01-02-Asio201Timeouts.md b/_posts/2023-01-02-Asio201Timeouts.md
deleted file mode 100644
index 5f557187c..000000000
--- a/_posts/2023-01-02-Asio201Timeouts.md
+++ /dev/null
@@ -1,623 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: asio
-title: Asio 201 - timeouts, cancellation & custom tokens
-author-id: klemens
----
-
-Since asio added and beast implemented per-operation cancellation,
-the way timeouts can be implemented in asio code has changed significantly.
-
-In this article, we'll go from simple timeouts to building our own timeout completion token helper.
-
-# Cancellation
-
-A timeout is a defined interval after which a cancellation
-will be triggered, if an action didn't complete by then.
-
-Timeouts *can* be a way of handling runtime errors, but one should generally be prudent about their usage. Indiscriminate application
-of timeouts with intervals based on the programmer's feelings can
-lead to bad code and odd behavior.
-
-## Previous solutions
-
-Previous to per-operation cancellation, one could only cancel all operations on a given io-object. E.g.:
-
-```cpp
-extern asio::ip::tcp::socket sock;
-extern std::string read_buffer, write_buffer;
-
-asio::async_read(sock, asio::dynamic_buffer(read_buffer), asio::detached);
-asio::async_write(sock, asio::buffer(write_buffer), asio::detached);
-
-// cancel both the write and the read, by cancelling all outstanding operations
-sock.cancel();
-```
-
-Due to the popularity of timeouts, beast provides it's own stream wrappers, `tcp_stream` & `ssl_stream` that (among other things) provide
-a timeout using this kind of cancellation based on internal timers.
-
-## Per operation cancellation
-
-Per operation cancellation is a much more fine-tuned model;
-instead of cancelling all outstanding operations on an io-object,
-it cancels particular ones.
-
-```cpp
-
-extern asio::ip::tcp::socket sock;
-extern std::string read_buffer, write_buffer;
-
-asio::cancellation_signal cancel_read, cancel_write;
-
-asio::async_read(sock, asio::dynamic_buffer(read_buffer), asio::bind_cancellation_slot(cancel_read.slot(), asio::detached));
-asio::async_write(sock, asio::buffer(write_buffer), asio::bind_cancellation_slot(cancel_write.slot(), asio::detached));
-
-// cancel only the read op with cancellation type terminal
-cancel_read.emit(asio::cancellation_type::terminal);
-```
-
-## Cancellation types
-
-The different kinds of cancellation are:
-
- - *terminal*:
-Requests cancellation where, following a successful cancellation, the only safe operations on the I/O object are closure or destruction.
-
- - *partial*:
-Requests cancellation where a successful cancellation may result in partial side effects or no side effects. Following cancellation, the I/O object is in a well-known state, and may be used for further operations.
-
- - *total*:
-Requests cancellation where a successful cancellation results in no apparent side effects. Following cancellation, the I/O object is in the same observable state as it was prior to the operation.
-
-The sender may combine multiple types with `operator|`; the receiver uses the cancellation as a signal he may ignore and he should satisfy the lowest level of cancellation possible.
-
-## Full example
-
-To give an example of the cancellation types on a protocol level,
-consider the following function (written as a coroutine for simplicity):
-
-```cpp
-// read data from the stream and forward it to the parser
-// until one full value is read.
-// whatever is leftover goes into the `buf` to be used for the next value.
-template
-auto async_read_json(Stream & stream,
- json::stream_parser & parser,
- DynamicBuffer & buf /*< beast style buffer! */)
- -> asio::awaitable
-{
-
- // 0: Nothing happened
- while (!parser.done())
- {
- // 1: read the next chunk
- const std::size_t read_n =
- co_await stream.async_read_some(
- buf.prepare(4096), asio::use_awaitable);
- // 2: move it to the read buffer
- buf.commit(read_n);
- // 3: write it to the parser
- const auto wbuf = buf.cdata();
- const std::size_t writ_n = parser.write(static_cast(wbuf.data()), wbuf.size());
- // 4: remove parsed bytes from the buffer
- buf.consume(writ_n);
- }
-
- co_return parser.release();
-}
-```
-
-*terminal*: this means the data & the stream can only be closed. That is, if the algorithm receives a cancellation in step (1), it can just exit directly, because
-the cancellation indicates the caller doesn't care about the data anymore.
-
-*partial*: this means the operation might have read actual data, but can be resumed later on. If partial cancellation occurs we need to at least transfer the read data into the buffer; in this case however, they should also be sent to the parser,
-as the json might be complete and next run async_read_some will prevent us from completing.
-
-*total*: Total cancellation means no side effects, i.e. nothing was read. This may happen on our first iteration through the loop, if async_read_some gets cancellation before a single byte has been written.
-
-With this in mind we can rewrite out coroutine to handle cancellation -
-note that `awaitable`s have an internal cancellation state.
-
-```cpp
-template
-auto async_read_json(Stream & stream,
- json::stream_parser & parser,
- DynamicBuffer & buf /*< beast style buffer! */)
- -> asio::awaitable
-{
- // by default awaitables only allow terminal cancellation
- // we'll enable all types here:
- co_await asio::this_coro::reset_cancellation_state(asio::enable_total_cancellation());
-
- while (!parser.done())
- {
- // check if we've been cancelled!
- asio::cancellation_state cs = co_await asio::this_coro::cancellation_state;
- if (cs.cancelled() != asio::cancellation_type::none)
- break;
- // capture ec, so nothing gets thrown
- const auto [ec, read_n] =
- co_await stream.async_read_some(
- buf.prepare(4096), asio::as_tuple(asio::use_awaitable));
- if (ec == asio::error::operation_aborted)
- {
- using c_t = asio::cancellation_type;
- //update the state
- cs = co_await asio::this_coro::cancellation_state;
- c_t c = cs.cancelled();
- // total means nothing happened,
- // terminal means the data doesn't matter
- if ((c & (c_t::total | c_t::terminal)) != c_t::none)
- throw system::system_error(ec);
- // partial means we need to finish the loop
- // so we just do nothing and do NOT reset the filter!
- }
- else if (ec) // indiscriminately throw everything else
- throw system::system_error(ec);
- else
- // reset it to partial after the first read;
- co_await asio::this_coro::reset_cancellation_state(
- asio::enable_partial_cancellation());
-
- buf.commit(read_n);
- const auto wbuf = buf.cdata();
- const std::size_t writ_n =
- parser.write(static_cast(wbuf.data()), wbuf.size());
- buf.consume(writ_n);
- }
-
- asio::cancellation_state cs = co_await asio::this_coro::cancellation_state;
- if (cs.cancelled() != asio::cancellation_type::none)
- throw system::system_error(asio::error::operation_aborted);
-
- co_return parser.release();
-}
-```
-
-The above example is complex because it is considering different kinds of cancellation
-and when they can be provided to the caller.
-
-# Timeouts
-
-Based on the previous discussion, we may now use a timer
-and connect it to a cancellation slot to provide a timeout.
-
-```cpp
-asio::awaitable do_read(
- asio::ip::tcp::socket &sock,
- std::chrono::seconds timeout = std::chrono::seconds(5)
-)
-{
- asio::steady_timer tim{co_await asio::this_coro::executor, timeout};
- asio::cancellation_signal cancel_read;
- std::string read_buffer;
-
- tim.async_wait(
- [&](system::error_code ec)
- {
- if (!ec) // timer completed without getting cancelled himself
- cancel_read.emit(asio::cancellation_type::all);
- });
-
- co_await asio::async_read(sock, asio::dynamic_buffer(read_buffer),
- asio::bind_cancellation_slot(cancel_read.slot(), asio::use_awaitable));
- tim.cancel();
-
- co_return read_buffer;
-}
-```
-
-There is a problem in the above code: any cancellation delivered to `do_read` gets ignored. That is, the `awaitable` itself is an async operation that can get cancelled.
-
-```cpp
-extern asio::ip::tcp::socket sock;
-asio::cancellation_signal dr_c;
-asio::co_spawn(sock.get_executor(), do_read(sock),
- asio::bind_cancellation_slot(dr_c.slot(), asio::detached));
-dr_c.emit(asio::cancellation_type::all); // < ignored!
-```
-
-In order to rectify this, we need to also need to forward the cancellation received by the `awaitable`:
-
-```cpp
-asio::awaitable do_read(
- asio::ip::tcp::socket &sock,
- std::chrono::seconds timeout = std::chrono::seconds(5)
-)
-{
- asio::steady_timer tim{co_await asio::this_coro::executor, timeout};
- asio::cancellation_signal cancel_read;
- asio::cancellation_slot sl =
- (co_await asio::this_coro::cancellation_state).slot();
-
- std::string read_buffer;
- sl.assign(
- [&](asio::cancellation_type ct)
- {
- // cancel the timer, we don't need it anymore
- tim.cancel();
- // forward the cancellation
- cancel_read.emit(ct);
- });
-
- // reset the signal when we're done
- // this is very important, the outer signal might fire after we're out of scope!
- struct scope_exit
- {
- asio::cancellation_slot sl;
- ~scope_exit() { if(sl.is_connected()) sl.clear();}
- } scope_exit_{sl};
-
- // regular timeout with a timer.
- tim.async_wait(
- [&](system::error_code ec)
- {
- if (!ec) // timer completed without getting cancelled himself
- cancel_read.emit(asio::cancellation_type::all);
- });
-
- // the actual op
- co_await asio::async_read(sock, asio::dynamic_buffer(read_buffer),
- asio::bind_cancellation_slot(cancel_read.slot(), asio::use_awaitable));
- tim.cancel();
-
- co_return read_buffer;
-}
-```
-
-This is getting a bit verbose, so that users might look for alternatives.
-
-## `parallel_group` / `operator||`
-
-Thus the easiest way to implement a timeout is with a `parallel_group`. You might have seen the `awaitable_operators` used like this:
-
-```cpp
-using namespace asio::experimental::awaitable_operators;
-
-extern asio::ip::tcp::socket sock;
-extern steady_timer tim;
-extern std::string read_buffer;
-
-co_await (
- asio::async_read(sock, asio::dynamic_buffer(read_buffer), asio::use_awaitable) || tim.async_wait(asio::use_awaitable));
-```
-
-The `operator||` runs two awaitables in parallel, waiting for one to finish. When the first completes it cancels the other ones `terminal`y.
-
-This gives us a timeout, that will always be terminal, and is implement by means of [parallel_group](https://www.boost.org/doc/libs/master/doc/html/boost_asio/reference/experimental__parallel_group/async_wait.html), i.e. similar to this:
-
-```cpp
-co_await
- experimental::make_parallel_group(
- asio::async_read(sock, asio::dynamic_buffer(read_buffer), asio::deferred),
- tim.async_await(asio::deferred)
- ).async_wait(
- experimental::wait_for_one(),
- asio::use_awaitable
- );
-```
-
-This is fine for many simple solutions & examples,
-but it's a very blunt & not terribly efficient way to achieve only terminal cancellation.
-
-It is important to mention, that a per low level operation timeout might also not be the right approach altogether. On the one hand, it might not be required that a particular single operation (like connect) completes within a certain amount of time, but that a series of operations does so (like resolve + connect + handshake).
-
-This means choosing where to put timeouts is a task for careful engineering.
-
-## Watchdogs
-
-Another popular pattern is a watchdog,
-when the requirement is to assure continuous progress.
-That is, we want to make sure, that a long running does not get stuck, but every so often does some successful work.
-Consider downloading a huge file; we can't really put a timeout on it, but we can check that it did download some bytes every few seconds.
-
-You would usually use this for complex & long running operations, but for our example, we'll just reuse the
-async_read_json function.
-
-```cpp
-
-template
-auto async_read_json(Stream & stream,
- json::stream_parser & parser,
- DynamicBuffer & buf, /*< beast style buffer! */
- watchdog & wuff)
- -> asio::awaitable
-{
- wuff.reset();
- while (!parser.done())
- {
- const std::size_t read_n =
- co_await stream.async_read_some(
- buf.prepare(4096), asio::use_awaitable);
- wuff.reset();
- buf.commit(read_n);
- const auto wbuf = buf.cdata();
- const std::size_t writ_n = parser.write(
- static_cast(wbuf.data()), wbuf.size());
-
- buf.consume(writ_n);
- }
-
- co_return parser.release();
-}
-```
-
-If the `.reset` function on the watchdog isn't called during the watchdog interval,
-it will cancel the operation.
-
-This watchdog can be as simple as this:
-
-```cpp
-struct watchdog
-{
- watchdog(asio::any_io_executor exec, std::chrono::milliseconds interval)
- : tim(exec, interval), interval(interval)
- {}
-
- asio::steady_timer tim;
- std::chrono::milliseconds interval;
- asio::cancellation_signal cancel;
- void reset()
- {
- tim.expires_after(interval);
- tim.async_wait(
- [this](system::error_code ec)
- {
- if (!ec)
- cancel.emit(asio::cancellation_type::terminal);
- });
- }
-};
-```
-
-And we can use it with our awaitable by a simple bind:
-
-```cpp
-extern asio::ip::tcp::socket sock;
-
-beast::flat_buffer buf;
-json::stream_parser parser;
-
-watchdog wuff{sock.get_executor(), std::chrono::milliseconds(5000)};
-asio::co_spawn(sock.get_executor(),
- async_read_json(sock, parser, buf, wuff)
- asio::bind_cancellation_slot(wuff.cancel.slot(), asio::detached)
- );
-```
-
-# A custom timeout token
-
-While writing your own completion tokens is a bit of a hassle,
-it may be worth the effort if an entire application is using it.
-
-Here, we will write a `timeout` utility that utilizes different timeouts
-to fire a sequence of all cancellation types. The idea is that we do not want to use terminal cancellation right away, as we might corrupt data unnecessarily with that.
-
-Instead we have three intervals. After the first, we try `total` cancellation;
-if that doesn't do anything, we wait the second interval and use `partial` cancellation.
-If nothing happens after that, we go for `terminal`.
-
-```cpp
-struct timeout_provider;
-
-// that's our completion token with the timeout attached
-template
-struct with_timeout
-{
- timeout_provider * provider;
- Token token;
-};
-
-// this is the timeout source
-struct timeout_provider
-{
- timeout_provider(
- asio::any_io_executor exec
- ) : tim{exec, std::chrono::steady_clock::time_point::max()} {}
-
- asio::steady_timer tim;
-
- std::chrono::milliseconds tt_total = std::chrono::milliseconds(2000);
- std::chrono::milliseconds tt_partial = std::chrono::milliseconds(3000);
- std::chrono::milliseconds tt_partial = std::chrono::milliseconds(5000);
-
- asio::cancellation_slot parent;
- asio::cancellation_signal timeout;
-
- asio::cancellation_type last_fired{asio::cancellation_type::none};
-
- ~timeout_provider()
- {
- if (parent.is_connected())
- parent.clear();
- }
-
- // to use it
- template
- auto operator()(Token && token)
- {
- return with_timeout>{
- this, std::forward(token)
- };
- }
-
- // set up the timer and get ready to trigger
- void arm()
- {
- last_fired = asio::cancellation_type::none;
- tim.expires_after(tt_total);
- if (parent.is_connected())
- parent.assign([this](asio::cancellation_type ct){timeout.emit(ct);});
- tim.async_wait(
- [this](system::error_code ec)
- {
- if (!ec) fire_total();
- });
- }
-
- void fire_total()
- {
- timeout.emit(last_fired = asio::cancellation_type::total);
- tim.expires_after(tt_partial);
- tim.async_wait(
- [this](system::error_code ec)
- {
- if (!ec) fire_partial();
- });
- }
-
- void fire_partial()
- {
- timeout.emit(last_fired = asio::cancellation_type::partial);
- tim.expires_after(tt_terminal);
- tim.async_wait(
- [this](system::error_code ec)
- {
- if (!ec) fire_terminal();
- });
- }
-
- void fire_terminal()
- {
- timeout.emit(last_fired = asio::cancellation_type::terminal);
- }
-};
-```
-
-The plan is then to use this like so:
-
-```cpp
-asio::awaitable do_read(
- asio::ip::tcp::socket &sock,
- timeout_provider & timeout)
-{
- std::string read_buffer;
- co_await asio::async_read(sock, asio::dynamic_buffer(read_buffer),
- timeout(asio::use_awaitable));
- co_return read_buffer;
-}
-```
-
-In order to do that we need to provide a custom async_initiate with a
-custom token. The reason we need a custom handler is that lazy operations like use_awaitable and deferred still work.
-
-Before we jump into a rather long piece of code, let's recap how async initiation works.
-
-We pass a completion token to async_initiate, together with the initiation of our op (e.g. `async_initiate_read`).
-The completion token must have a specialization of `async_result` that will call `initiate` with it's completion handler
-and return a result value. The handler is usually some internal type, that has associators (e.g. an associated allocator).
-For example, `use_awaitable` is a token, `awaitable` the return type of it's initialization and some `detail` type it's handler.
-
-In order for our timeout to work, we need to wrap the other completion token, and then intercept the call to the initiation
-to obtain the handler, and wrap it as well.
-
-```cpp
-// the completion handler
-// that's our completion token with the timeout attached
-template
-struct with_timeout_binder
-{
- timeout_provider * provider;
- Handler handler;
-
- template
- void operator()(Args && ... args)
- {
- //cancel the time, we're done!
- provider->tim.cancel();
- std::move(handler)(std::forward(args)...);
- }
-};
-
-namespace boost::asio
-{
-
-// This is the class to specialize when implementing a completion token.
-template
-struct async_result, Signatures...>
-{
- using return_type = typename async_result::return_type;
-
- // this wrapper goes around the inner initiation, because we need to capture their cancellation slot
- template
- struct init_wrapper
- {
- Initiation initiation;
- timeout_provider * provider;
-
- // the forwards to the initiation and lets us access the actual handler.
- template
- void operator()(
- Handler && handler,
- Args && ... args)
- {
- auto sl = asio::get_associated_cancellation_slot(handler);
- if (sl.is_connected())
- provider->parent = sl;
- provider->arm();
- std::move(initiation)(
- with_timeout_binder>{
- provider,
- std::forward(handler)
- }, std::forward(args)...);
- }
- }
-
- // the actual initiation
- template
- static auto initiate(Initiation && init,
- RawToken && token,
- Args && ... args) -> return_type
- {
- return async_result::initiate(
- // here we wrap the initiation so we enable the above injection
- init_wrapper>(std::forward(init), token.provider),
- std::move(token.token),
- std::forward(args)...);
- }
-};
-
-
-// forward the other associators, such as allocator & executor
-template class Associator,
- typename T, typename DefaultCandidate>
-struct associator,
- DefaultCandidate>
-{
- typedef typename Associator::type type;
-
- static type get(const with_timeout_binder& b,
- const DefaultCandidate& c = DefaultCandidate()) noexcept
- {
- return Associator::get(b.handler, c);
- }
-};
-
-// set the slot explicitly
-template
-struct associated_cancellation_slot<
- with_timeout_binder,
- CancellationSlot1>
-{
- typedef asio::cancellation_slot type;
-
- static type get(const with_timeout_binder& b,
- const CancellationSlot1& = CancellationSlot1()) noexcept
- {
- return b.provider->timeout.slot();
- }
-};
-
-}
-```
-
-The above code can be found in a working example [here](https://gcc.godbolt.org/z/ndxes7Gaf)
-
-While the above code is quite a handful, it does create a new completion token.
-It does however give us more fine-tuned control over timeouts in a very readable
-& fine-tuned way.
-
diff --git a/_posts/2023-05-08-Future-of-Boost.md b/_posts/2023-05-08-Future-of-Boost.md
deleted file mode 100644
index f25aef45b..000000000
--- a/_posts/2023-05-08-Future-of-Boost.md
+++ /dev/null
@@ -1,181 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: Boost
-author-id: vinnie
-title: The Future of Boost
----
-
-Greetings! I’m Vinnie Falco, Boost library author, C++ enthusiast, and the founder of The C++ Alliance, a 501(c)(3) non-profit. While some of you are enjoying the C++Now conference this week, I’d like to share some background on our organization and some history, outline a vision and goals for C++ and Boost, and solicit your feedback and support.
-
-# How It Started
-
-I took notice of the C++ Standards Committee (“WG21”) while I was writing Boost.Beast in 2016. Howard Hinnant, a co-workers at Ripple, taught me about writing papers and committee meetings. Beast used Boost.Asio (portable networking for C++) and I was and still am a huge fan of this network library. I learned that Asio was being proposed for standardization. There was even a “Networking TS” document: Asio was very close to becoming part of C++ officially! But the author Christopher Kohlhoff always seemed to not have the time to attend the meetings and push this proposal though.
-
-Something which should not surprise anyone is that I despise paying taxes. In 2017, I had an idea: create a charitable organization which I can donate pre-tax income to, and then I could hire Christopher Kohlhoff as a “staff engineer” to work full time on C++ standardization, and Boost things! I would find the very best C++ people who are already doing open source work, then hire them full-time so they could focus on their open source C++ open work from home, instead of traveling to a boring job in order to make a living.
-
-# A Few Setbacks
-
-In 2018 I offered this opportunity to Chris and he surprisingly turned it down. He actually liked going into an office and interacting with customers and users. He explained that the evolution of Asio and his WG21 work is not bottlenecked by time. Instead, he prefers to “think deeply about things over a long period, and then write something.” Basically the opposite of my strategy, which is to write a bunch of code quickly and then throw out the bad parts.
-
-This is a setback but I am not so easily deterred so I offered the same opportunity to Peter Dimov, an engineer of immense talent whose libraries are legendary. He also declined, explaining that taking a salary would transform a hobby into an obligation, affecting the quality and enjoyment of the work.
-
-Now I’m thinking, well this is a disaster! We had the non-profit in operation officially since March of 2018 (the IRS approved us in September of 2019). We had the C++ language Slack workspace as of November of 2017, transitioned to a paid plan with full history. Our strategy shifted to focus on supporting the Boost Library Collection directly. We hired our first Staff Engineer, Marshall Clow, in April of 2018.
-
-Fast forward and today we have 11 staff members. We have a great CTO/DevOps genius Sam Darwin. And we have Louis Tatta, our CEO that keeps things running smoothly and helps get the most out of every dollar donated. At some point I’ll share a complete list of everything that The C++ Alliance has done since the beginning, but that is the subject of another missive. Today I would like to talk about a vision for Boost.
-
-# The Boost Library Collection
-
-Long-timers know Boost’s history but for those that don’t, Beman Dawes and Robert Klarer came up with the idea of a website offering curated, high quality C++ libraries in May of 1998. They described the “Formal Review,” a social technology where a group of peers would go over a proposed library at an agreed-upon time. They could interrogate the author about the library on the mailing list, and debate things. The outcome is a collection of posts where each reviewer summarizes their critique of the library, including whether or not to “accept” the library (sometimes with conditions). The founding documenting evokes a feeling of something big:
-
-[https://www.boost.org/users/proposal.pdf](https://www.boost.org/users/proposal.pdf)
-
-The collection was named “Boost” and received many great contributions. The authors and reviewers were active in the standardization committee. In December of 2005, Boost.Asio was added after being developed since 2003. In September of 2011 the C++11 standard was published, containing many library components modeled closely or identically to their Boost counterparts. In my opinion, Asio’s efforts at standardization were thwarted by the growth of politics; an inevitable consequence of the bureaucratic ISO structure.
-
-Boost launched its own conference called BoostCon in 2007 on the heels of its success. Speakers included Scott Meyers, Dave Abrahams, Eric Niebler, Howard Hinnant, and other juggernauts of C++. A new conference called CppCon was launched in 2014 and attracted even larger crowds, as it was focused on C++ in general.
-
-# Trouble Brewing
-
-With the release of C++11, there were now components in Boost which were duplicated in the Standard Library. The C++ committee became more popular and valuable owing to the success of C++11, made possible in part by years of lead-up from the talented Boost engineers. The conferences turned some people into the equivalent of pop stars, appearing as staple keynote speakers.
-
-Library writers discovered it was easier to get a proposal into the C++ standard than it was to get a library through the Formal Review process. They discovered that there was more glory to have a proposal accepted into the official C++ language, than to have their library accepted into Boost. And once their proposal became part of C++, they no longer had to “maintain their code” the way they would if their library got in Boost. A Formal Review evaluates the author somewhat in addition to the library. Because once a library is accepted, it must be maintained and evolved. When an author abandons their Boost library, the community is impoverished as the knowledge and expertise leaves with them. And someone else must take over the maintenance.
-
-In December of 2020, Beman Dawes passed away and Boost suffered a loss which can never be replaced. Beman had an enormous impact on not just the libraries but also C++. He was in WG21 from the very beginning, chaired LWG (the “Library Working Group”) for quite some time, and achieved a long history of open source contributions.
-
-Boost had other problems. Fewer libraries were being proposed, and it took longer to find a volunteer review manager. Mailing list volume declined steadily. At 160+ libraries, users complained that “Boost is too large.” They complained that “many of the libraries are outdated”, that “the documentation is of varying quality”, and that “Boost takes too long to compile.” They complained about the obscure build system and lack of cmake support. The archetype of “never Booster” appeared: individuals or corporations who ban the use of Boost entirely.
-
-Beman was the closest thing resembling a leader, despite Boost being a federation of authors with each having final word over their own library. Beman would solve problems, help with the direction of things, and even “beat the bushes” when it was time for a review by reaching out to his network of contacts and soliciting their participation. Without Beman, Boost lost its leader. Boost lost its Great Founder. And no one has since filled the role.
-
-# The C++ Alliance
-
-At this point, a vision for what our non-profit could do crystallized. We would help C++ in general by refreshing the foundations of Boost, restoring Boost’s prominence in the community, and helping Boost become a leader once again as innovators in C++. To do this, I'll share what we feel are the problems facing Boost, and ways to address some of them. Finally I'd like you to weigh in on all of this and help figure out what is important and what successful execution might look like.
-
-We believe Boost faces these obstacles, organized broadly by category:
-
-### Stagnation
- * There are fewer new libraries proposed.
- * Formal reviews get less participation.
- * Review managers are typically scarce now.
- * The mailing list volume is thinning; younger folks don’t use lists.
- * There is no second order effect: new libraries rarely use Boost.
-
-### Quality
- * Some libraries are unmaintained and create a negative user experience.
- * Users open issues, and no one replies to them.
- * Pull requests are submitted to abandoned repositories.
- * Scant financial resources for infrastructure or staff.
-
-### Documentation
- * The quality of documentation varies greatly across libraries.
- * The rendered pages and content of some documentation looks dated.
- * Some toolchains used are obscure and unmaintained.
-
-### Perception
- * Boost causes long compile times.
- * The libraries have too many interdependencies
- * Supporting old C++ versions is a weakness not a strength.
- * The duplication of std components is wasteful and causes friction.
- * The “Monolithic” distribution of Boost is obsolete.
-
-### Messaging
- * The website is outdated and never receives updates.
- * Boost’s value proposition is not clear (“why use boost?”)
- * There is no clear voice countering misconceptions and irrational phobias.
- * Users receive no guidance about the future, or what is maintained.
- * The libraries have no representation at conferences.
-
-Users have also weighed in with their thoughts on Boost:
-
-[https://www.reddit.com/r/cpp/comments/gfowpq/why_you_dont_use_boost/]([https://www.reddit.com/r/cpp/comments/gfowpq/why_you_dont_use_boost/)
-
-# A Plan
-
-I love C++, supporting users, and the Boost author experience. I think these problems can be solved. But not by demanding that “everyone who maintains a Boost library must do X.” In Boost culture when you want something done you need to do it yourself, then convince the mailing list of the merits of your proposal.
-
-As a library author and contributor, I know that whatever I do will never rise to the same level as the original act of the creation of the Boost Library Collection. But I will be satisfied if I can stoke its fires and bring them back to a roar. To this end the resources of the non-profit are directed into projects we believe will positively affect Boost:
-
-# Website Renovation
-
-Our vision for an updated Boost website is clean and stylish, which speaks to a large and diverse audience. This site will have a design and content that effectively communicates the value proposition of using the Boost Library Collection: that you will write better C++ code, become more productive, and achieve greater success in your career or personal projects. Features will foster participation and revisits, with content updated regularly. The library presentation is elevated with a new visual design language that evokes distinction and appeal, and credits the authors, maintainers, and contributors that bring it to life.
-
-To achieve this vision, you have probably heard that we contracted an outside software firm to build something precisely tailored for our needs. We care too much about Boost to use an ill-fitted, off the shelf product. This website has a lot of software behind it (written in Python as part of a Django framework application) and like most software projects it is late and over budget. I’ll refrain from saying “it’ll be ready soon” and just post a link to the new site instead, hopefully in a few weeks.
-
-I have been personally involved in the design, presentation, and execution of the features of the website, most of which have been cut from the initial release in order to speed things along. The goal is to show the library collection in a way that highlights its strengths and speaks to a desire of every C++ programmer: to find the perfect library they can add as a dependency to help complete their next project.
-
-The Boost website and the site documentation can be illustrated by retaining a talented digital artist to produce custom assets that are unified in style, colors, and messaging, so that the entire site feels purposeful. This artist will also provide imagery used for our social media campaigns such as the announcements we make on Twitter which some of you might have already seen
-
-[https://twitter.com/Boost_Libraries](https://twitter.com/Boost_Libraries)
-
-I have strived to give every tweet an image to enhance the Boost brand.
-
-Recently an animated discussion on the mailing list took place about adding a forum which does not replace the mailing list but is integrated to work with it. Posts in the forum become posts to the mailing list, and vice versa. Users of the mailing list and users of the forum will have no idea they are interacting, even though they are. This can only be possible if we write the software ourselves, from the ground up, with exactly one constraint: the mailing list will continue to operate exactly as it does today, on an unmodified version of Mailman 3. The mailing list users stay happy, and we can attract new people who prefer a web-based interface.
-
-The C++ Alliance prioritizes its allocation of resources to ensure not only the website’s completion, but also dedicated staff for ongoing maintenance and improvement. The Boost website will rise over time to the same level of quality expected of every Boost library. Community members should feel free to open issues on the website repository with bugs or features, knowing that every issue will be looked at, triaged, and addressed appropriately.
-
-# Documentation Improvement
-
-Our vision for documentation is to ensure that every Boost library has the option to adopt a well-maintained toolchain that is easily deployed, produces high-quality output befitting the Boost brand, is itself well-documented and easy to use, and has behind it full-time staff working continuously to make improvements and provide technical support.
-
-After researching the domain extensively (by just asking Peter Dimov) we have discovered that the markdown format Asciidoc is a very popular format with a simple and well maintained toolchain. Several regularly active Boost authors have already switched their libraries to using Asciidoctor. The authors of the Asciidoctor tool are also the authors of “Antora,” a modular, multi-repository documentation site generator:
-
-[https://docs.antora.org/antora/latest/](https://docs.antora.org/antora/latest/)
-
-We have built a new, modern set of additional scripts capable of building the Boost release and documentation, including the capability of rendering “Antora-enabled Boost library repositories” using this Antora system. The results are beautiful and modern, and the Asciidoctor / Antora toolchain holds the promise of being popular and well-maintained for a long time. The use of Asciidoc or Antora is optional; this is just an additional choice.
-
-Peter Turcan is our full-time Senior Technical Writer who is modernizing the instructions for users, maintainers, contributors, and formal review participants. You can see Peter’s work along with the quality of Antora’s output here (note that the user-interface is stock and will be restyled soon):
-
-[https://docs.cppalliance.org/](https://docs.cppalliance.org/)
-
-The website above has a new full-text search feature (try it!). We are investing in a search experience which includes the site docs, library docs, library references, and even the public header files. We are also investing in the deployment of a large language model (ChatGPT-style AI) trained in Boost and C++ specifics to answer questions for users. We have a new talented and eager staff engineer working full-time exclusively on this, and I don’t want to steal his thunder so I will let him explain further soon.
-
-Some Boost libraries currently generate their documentation reference pages using Doxygen combined with other obscure tools such as xsltproc or Saxon-HE to render into Boost Quickbook, an obsolete form of markdown which only we use. This Quickbook is rendered into BoostBook, which is a flavor of DocBook. The BoostBook is converted into HTML by a DocBook renderer. This rapidly obsolescing toolchain is painful to work with and is a form of technical debt which costs us.
-
-I have begun work on a new command-line tool called MrDox (“mister docs”) which uses the unstable clang libtooling API to extract the documentation comments and declarations from C++ programs, and turn them into beautiful Asciidoc reference pages. You can see that work here:
-
-[https://github.com/cppalliance/mrdox](https://github.com/cppalliance/mrdox)
-
-The core principles of the design of MrDox is to always understand the very latest C++ constructs and extract them with high fidelity. For example it recognizes conditional noexcept, constexpr, deduction guides, all attributes, and many other things that other documentation toolchains cannot fathom. In a nutshell I intend to bring the same level of Boost quality to the documentation toolchain that Boost has brought to the C++ libraries themselves.
-
-MrDox intends to completely replace Doxygen, xsltproc, Saxon-HE, Quickbook, Boostbook, and Docbook, as the only requirement to render its results is to run the Asciidoctor tool, which has no other dependencies. This toolchain offers modernization and simplification for anyone who opts-in to it, which reduces long-term risks and improves results. This unfortunately delays the development of my other libraries, but enhancements in the documentation toolchain are a force multiplier; many Boost libraries can benefit.
-
-# Continuous Integration
-
-Our vision for continuous integration is to bring the most talented individuals together and combine that with state of the art technology and resources to ensure that every library has at its disposal, access to robust cloud services for continuous integration. These services are the lifeblood of maintaining and communicating the quality of a library. We aim to provide dedicated staff and technical support to fortify Boost in the ever-shifting landscape of free CI services for open source projects.
-
-The infrastructures providing our continuous integration services are the lifeblood of maintaining the high quality of the Boost collection. Library authors test against many versions of C++ and many different compiler versions. And we have many libraries; over 160 of them which all compete for the finite public resources offered by GitHub through GHA, through Azure Pipelines, or Appveyor.
-
-When Travis discontinued its free service, our CTO Sam Darwin deployed Drone ([https://www.drone.io/](https://www.drone.io/)) instances and offered every Boost library a pull request which compiles and runs their tests on our new infrastructure. Although this service is still active and offered today, we are not content to leave it at that. CI services are volatile over time. Some come, some go, and some become overloaded which is the current situation with the public GitHub Actions runners during peak times. The Boost organization GitHub account has over one hundred and sixty libraries each submitting sometimes enormous numbers of jobs which take multiple hours to complete.
-
-Although the GHA environment resources are subjected to recurring oversubscription, we feel that it offers the best framework for composable actions and flexibility. Sam is exploring the possibility of having self-hosted C++ Alliance runners dedicated only to Boost jobs during peak times. Ensuring high availability of CI resources is an ongoing project for us, and we are always evaluating existing and new solutions to provide the best-of-class choices for libraries.
-
-# Library Enhancements
-
-Our vision for the libraries themselves is to preserve unchanged the amazing social technologies invented by the Boost founders which include the Formal Review process, the Release Schedule, the mailing list discussions, and the federated library ownership model. We want to ensure that no library is unmaintained and that every opened issue receives a response. We want the community to respect and admire the formal review process and participate with eagerness not only as reviewers but also as volunteer review managers and participants in the sometimes-heated list discussions. Library membership in the Boost library collection should be seen as the highest level of honor and recognition of effort.
-
-The C++ Alliance has ongoing direct investments in improving existing Boost libraries and writing new ones to be submitted for formal review. Many folks are already aware of the optimization efforts being applied to the Boost.Unordered library, whose plan was written up by Peter Dimov. Joaquín M López Muñoz is providing his mathematical expertise and container experience, while Christian Mazakas (one of our full-time staff engineers) is writing the implementation, tests, and documentation according to specification.
-
-People following Boost.Math might recognize Matt Borland as a regular contributor. He has joined us as a staff engineer and is currently working on a new library to be proposed for Boost: charconv, which is a C++11 port of the eponymous C++17 feature. This library will help libraries and users who may not have access to C++17 enjoy the same features through Boost instead.
-
-# Messaging and Direction
-
-Our vision for Boost includes clear messaging to inform the public on the status of the libraries, the challenges we are facing, and what our future direction might be. We believe in robust two-way communication between library authors and maintainers, and the stakeholders which are largely the people and companies which use the Boost libraries. We believe in having a social media presence that helps convey the prestige and status that comes with the quality Boost libraries offer.
-
-Currently we have only anecdotal evidence of Boost’s adoption (or lack thereof) in various companies and projects. We only hear from the people who complain or open issues, or post to the mailing list. We do not have a concise list of companies using Boost, when new companies adopt Boost, or when companies stop using Boost. We do not have feedback from stakeholders about which Boost libraries they rely on the most, what they would like to see in future versions, or in some cases even if they are having problems with a library or its documentation.
-
-The decentralized model of Boost library development works great for the problems it tries to solve but offers no overall directional guidance for Boost. Today the C++ language is facing unprecedented challenges: the popularity of Rust, the demands for “memory safety”, the rise of Artificial Intelligence capable of writing software independently, and possibility that the bureaucratic structure of WG21 renders it incapable of meeting these challenges in a lively or effective manner.
-
-We believe that Boost can offer the greatest value by focusing in the areas where C++ is strong and without meaningful competition. These include space exploration, game development, high-performance computing, embedded systems, the Internet of Things, robotics and industrial process control, financial services, computer vision and graphics, scientific simulation, and more.
-
-Furthermore the stunning and continued lack of networking in the standard library creates an opportunity for Boost to offer full-stack solutions in areas that speak to the strengths of C++. This is made possible because Boost already offers portable networking through Asio, HTTP and Websocket through Beast, excellent JSON parsing and serialization tailored for network programs, URLs, and more recently a Redis client (Boost.Aedis) and even a MySQL / MariaDB client. We intend to sponsor the development of non-Boost, open source applications and services that target specific underserved markets that would benefit from C++ solutions which use the excellent libraries that exist in Boost.
-
-# Where Do You Fit In?
-
-Our vision, our activity, and our deployed solutions are all “opt-in.” No one controls Boost or its libraries. Change is only possible with consensus of the folks that matter: authors, maintainers, and release managers. If Robert Ramey wants to keep his documentation in hand-written HTML that is entirely his choice; no one dictates what libraries do. We can only offer choices, and hope they will be seen as valuable.
-
-This has been a long read, and I appreciate your investment of time. How do you feel about this vision? What would you change, or add, and what needs work? We welcome feedback, and value the volunteers who share our vision and help us execute it.
-
-I invite you to stay tuned for more great news, coming soon!
-
-Respectfully Yours
-
-Vinnie Falco
diff --git a/_posts/2023-05-09-New-Website.md b/_posts/2023-05-09-New-Website.md
deleted file mode 100644
index cf54f19e8..000000000
--- a/_posts/2023-05-09-New-Website.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: Boost
-author-id: vinnie
-title: New Website
----
-
-Hey, it’s Vinnie here again! I have some very exciting news to share with you. The renovated website for Boost that we’ve been working on for far too long is now going into its Public Beta phase! Feel free to poke around and kick the tires but keep in mind this is a piece of software and it is still under development. Some parts are missing, incomplete,or buggy. Without further ado:
-
-[https://boost.revsys.dev](https://boost.revsys.dev)
-
-This public beta will extend for at least 10 weeks as we finish the last remaining features, and put a few more rounds of polish on the artwork, visual styling, and user interface. After that if you like what you see then we will reset the database, move the repositories and the site to their new homes, and then deploy it for real! If you have suggestions or wish to report problems please feel welcome to open an issue here:
-
-[https://github.com/cppalliance/boost.org/issues](https://github.com/cppalliance/boost.org/issues)
-
-This is a Django site written and maintained in Python by a great group of folks from [https://www.revsys.com/](https://www.revsys.com/). The project is coordinated by Frank Wiles ([https://www.revsys.com/about/bio/frankwiles](https://www.revsys.com/about/bio/frankwiles)), who shares the same deep commitment to open source that we Boost people do. We chose Django because it is well understood and supported, and because Python composes well with ease of maintenance. This technology stack allows us to execute on our future plans with confidence.
-
-Some cool things about this website include:
-
-* Log in with GitHub or Google
-* Light and Dark themes with selector
-* Asciidoc markdown for dynamic content
-* Antora content sources for site documentation
-* Data-driven dynamic pages for libraries and releases
-* Integrated control panel for producing Boost releases
-* Full-text search across the documentation and library headers
-* Professionally designed logo to evoke identity and instant recognition
-
-The layout of the site is carefully thought out to make sure that information can be found quickly. Fundamental topics each get a dedicated word in the top level navigation, visible on every page including the site and library documentation. Most of the information which is not library-specific is warehoused in “site docs” which is a collection of individual Antora content sources each authored in Asciidoc. Anyone can update the information on the website simply by contributing to the site-docs via pull request:
-
-[https://github.com/cppalliance/site-docs](https://www.revsys.com/)
-
-Antora and Asciidoc are potent technologies that offer Boost plenty of room to grow to make our site documentation and library documentation more effective for users. While the initial release of the website after the beta will only have the basic functionality, we have many future plans. Some of these you have heard about already and some are new:
-
-* A web-based forum which interacts seamlessly with the mailing list
-* Polls and surveys for measuring the sentiment of Boost users and stakeholders
-* News section for aggregating off-site links, blog posts, and release progress messages
-* Integrated Review process. Outcomes go in the database and become dynamic content.
-* User Profiles record and show activity such as review participation or library submission
-
-We’re all very excited and proud of the work that went into this thing and hope that it really fulfills the positive impact that we intended.
-
-Thanks!
-
-Vinnie
diff --git a/_posts/2023-07-27-Coroutines.md b/_posts/2023-07-27-Coroutines.md
deleted file mode 100644
index d1e85e73e..000000000
--- a/_posts/2023-07-27-Coroutines.md
+++ /dev/null
@@ -1,288 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: asio
-title: Coroutine 101 - A simple serializer
-author-id: klemens
----
-
-This is the first post in a series explaining C++20 coroutines by example.
-
-# Goal
-
-The goal for this article is to create a simple non-recursive stream serializer, that can be used like this:
-
-```cpp
-stream::serializer serialize_ints(std::vector data)
-{
- for (auto idx = 0u; idx < data.size(); idx++)
- {
- if (idx != 0)
- co_yield ',';
- co_yield std::to_string(data[idx]);
- }
-}
-
-int main(int argc, char *argv[])
-{
- auto s = serialize_ints({1,2,3,4,5,6,7,8,9,10});
- std::string buffer;
- buffer.resize(10);
-
- using std::operator""sv;
-
- assert(s.read_some(buffer) == "1,2,3,4,5,"sv);
- assert(s.read_some(buffer) == "6,7,8,9,10"sv);
-
- return 0;
-}
-```
-
-That is we want a serializer coroutine that can `co_yield` strings and chars,
-and can get consumed by a simple sync interface.
-
-
-# Serializer
-
-The serializer interface itself is very simple:
-
-```cpp
-struct serializer
-{
- // The promise_type tells how to make this a coroutine.
- using promise_type = detail::serializer_promise;
-
- bool done() const;
- std::string_view read_some(std::span buffer);
-
- private:
- // The data passed into read_some
- std::span buffer_;
- // The amount written to the buffer
- std::size_t written_;
- // The bytes that couldn't be written bc the buffer was full.
- std::string_view remaining_;
-
- friend detail::serializer_promise;
- // The unique_handle is essentially a unique_ptr of a std::coroutine_handle (which doesn't free)
- detail::unique_handle impl_;
-};
-```
-
-Next let's quickly define the done & read_some functions:
-
-```cpp
-
-bool serializer::done() const
-{
- // check if we have a coroutine attached & it's not done
- // and that we don't have data remaining from a previously full buffer
- return (!impl_ || impl_.done()) && remaining_.empty();
-}
-
-std::string_view serializer::read_some(std::span buffer)
-{
- // consume data left from a previous co_yield.
- auto n = (std::min)(remaining_.size(), buffer.size());
- std::copy_n(remaining_.begin(), n, buffer.begin());
- written_ = n;
- remaining_.remove_prefix(n);
-
-
- buffer_ = buffer;
- // if the coroutine is still active & we have buffer space left, resume it
- if (!done() && (buffer_.size() != written_))
- {
- // tell the coroutine promise where to write the data
- impl_.promise().ser = this;
- // resume the coroutine to yield more data
- impl_.resume();
- }
-
- return {buffer.data(), written_};
-}
-```
-
-The `resume` function will resume the serializer coroutine until it
-either yields or returns.
-
-# Awaitables
-
-Before we get into the promise itself, we need to establish what awaitables are.
-
-And awaitable is a type with three functions:
-
-```cpp
-struct my_awaitable
-{
- bool await_ready();
- void await_suspend(std::coroutine_handle);
- T await_resume();
-};
-```
-When a coroutine awaits an awaitable, it will call `await_ready` first.
-If it returns `true` the coroutine does not need to suspend and will call `await_resume` to the await result (always void in this example).
-If `await_ready` returns false, the coroutine suspends and `await_suspend` will be called with the coroutines handle passed it.
-Once the coroutine gets resumed (by calling `resume()` on the handle)
-it will call `await_resume` to get the result and return the value.
-
-The main point is that `await_ready` can be used to avoid suspension
-of the coroutine, which can awaiting something an noop.
-
-The standard provides two awaitables:
-
- - std::suspend_never
- - std::suspend_always
-
-`suspend_never` will do nothing (`await_ready` returns true),
-while `suspend_always` will just suspend the coroutine (`await_ready` returns false).
-
-# Serializer Promise
-
-With that cleared up, let's look a the promise:
-
-```cpp
-struct serializer_promise
-{
- std::suspend_always initial_suspend() noexcept {return {};}
- std::suspend_always final_suspend () noexcept {return {};}
-
- serializer get_return_object();
-
- void return_void() {}
-
- void unhandled_exception() { throw; }
-
- struct conditional_suspend
- {
- bool written;
- bool await_ready()
- {
- [[likely]]
- if (written)
- return true;
- else
- return false;
- }
- void await_suspend(std::coroutine_handle) {}
- void await_resume() {}
- };
-
- conditional_suspend yield_value(const char & c);
- conditional_suspend yield_value(std::string_view c);
-
- serializer * ser;
-};
-```
-
-The next five functions are mandatory for any coroutine
-
-## initial_suspend
-
-The `initial_suspend` function gets called when the coroutine is created (i.e. when `serialize_ints` is called) and it's result
-awaited.
-In this case want the coroutine to be lazy, i.e. do nothing until `read_some` resumes it. Therefor we return `std::suspend_always`.
-
-## final_suspend
-
-Once the coroutine is past the `co_return` it will call final_suspend
-and wait the result. This can be used for continuations and cleanup.
-In our case, the `serializer` object holds the coroutine handle and
-will clean it up, so we also call `std::suspend_always`.
-
-## get_return_object
-
-The `get_return_object` function is used to gets called to create the
-handle of the coroutine, in our case a `serialize` object.
-
-The implementation is pretty straight forward:
-
-```cpp
-serializer serializer_promise::get_return_object()
-{
- serializer s;
- s.impl_ = detail::unique_handle::from_promise(*this);
- return s;
-}
-```
-
-## return_void
-
-A coroutine either defines `return_void` if there is not return value
-or `return_value` if there is.
-Since we check the completion through the handle, we don't need to do anything here.
-
-## unhandled_exception
-
-If the coroutine exits with an exception it can be intercepted here.
-We just rethrow it, so the caller to .resume will receive it.
-That is it will be thrown from `serializer.read_some`.
-
-## conditional_suspend
-
-The `conditional_suspend` is an awaitable that let's you dynamically chose between `std::suspend_never` and `std::suspend_always`.
-
-We use this so we only suspend if the buffer is full and we tell
-the compiler to optimize for this case using `[[likely]]`.
-
-## yield_value
-
-The `yield_value` functions are needed when `co_yield` should be possible within the coroutine function.
-
-The function will get called with one value and
-it's return value will be awaited. This way we can conditionally suspend if the buffer is full. In our coroutine,
-we can yield a string_view and a single char.
-
-
-```cpp
-auto serializer_promise::yield_value(const char & c) -> conditional_suspend
-{
- // we got room in the buffer, just write the data
- if (ser->written_ < ser->buffer_.size())
- {
- ser->buffer_[ser->written_] = c;
- ser->written_++;
- // don't suspend, we sent the data.
- return conditional_suspend{true};
- }
- else // the buffer is full. store it as remaining
- {
- ser->remaining_ = {&c, 1u};
- return conditional_suspend{false};
- }
-}
-
-auto serializer_promise::yield_value(std::string_view c) -> conditional_suspend
-{
-
- if (ser->written_ < ser->buffer_.size())
- {
- // write as many bytes to the buffer as you can.
- auto n = (std::min)(c.size(), (ser->buffer_.size() - ser->written_));
- std::copy_n(c.begin(), n, ser->buffer_.begin() + ser->written_);
- ser->written_ += n;
-
- c.remove_prefix(n);
- if (!c.empty())
- ser->remaining_ = c;
-
- // suspend if we couldn't write all of the data
- return conditional_suspend{c.empty()};
- }
- else
- {
- // not space remaining, suspend
- ser->remaining_ = c;
- return conditional_suspend{false};
- }
-
-}
-```
-
-## Conclusion
-
-And with that we can write a stream serializer without confusing syntax.
-
-In the next article, we'll make the serializer recursive, i.e.
-allow one serializer to await another one.
-
diff --git a/_posts/2023-09-22-Words-Matter-An-Apology.md b/_posts/2023-09-22-Words-Matter-An-Apology.md
deleted file mode 100644
index e884c39fa..000000000
--- a/_posts/2023-09-22-Words-Matter-An-Apology.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: vinnie
-author-id: vinnie
-title: "Words Matter: An Apology"
----
-To: All
-
-I am writing to you today to convey challenges I have faced regarding my interactions within our community, including Boost spaces, Standard C++ Committee meetings, and The C++ Language Slack Workspace. This year, I was diagnosed as neurodiverse by clinical experts. While this offers insights into the unique ways in which I process information and interact with others, it does not excuse the impact my comments may have had on those around me. My blunt, insensitive, and strongly expressed opinions have led to misunderstandings and hurt feelings.
-
-These events have cast a shadow upon my reputation, causing headwinds in achieving bigger goals; an association with Vinnie Falco is sometimes perceived negatively. I can’t change the past, but I can do things differently going forward. To achieve this I am holding myself accountable for what has transpired, and investing time and energy in my personal development to bring the best possible version of myself to public spaces.
-
-I can’t do this alone; therefore I am asking those who harbor sharp feelings, if you can find it within you to forgive me. My door is always open for one on one discussions for anyone who has questions or comments about this or any other topic.
-
-Sincerely,
-
-Vinnie Falco
diff --git a/_posts/2023-10-19-BradenCppCon2023TripReport.md b/_posts/2023-10-19-BradenCppCon2023TripReport.md
deleted file mode 100644
index 8ce4386e8..000000000
--- a/_posts/2023-10-19-BradenCppCon2023TripReport.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: braden
-title: CppCon 2023 Trip Report
-author-id: braden
----
-
-CppCon 2023 was my first time attending this conference in person (instead of in my dreams), and it was everything I hoped for and more. I have spent many hours watching conference talks on YouTube, and I'm sure most of them have been from CppCon. For a professional C++ developer, it's an experience like no other. Here I'll outline my experience throughout the week.
-
-# Talks I Loved
-
-There are many talks happening at CppCon. Most of the time there are 4 simultaneous talks, which I've been told is less than previous years where they've had 5 at once. I wanted to attend them all, so I had to make some tough choices. I'll watch the rest of them on YouTube as they come out. Here I'll outline 3 of the talks that had an impact on me.
-
-## "Advancing cppfront with Modern C++: Refining the Implementation of is, as, and UFCS"
-
-Filip Sajdak gave this talk all about how he reimplemented cppfront's "is" and "as" operators. From my memory, this talk focused mainly on properly using concepts and function overload sets for implementing generic functionality. I loved it. It scratched that specific part of my brain that craves perfection in generic code. It was beautiful to see how concept subsumption allowed the implementation to be much more readable than it used to be. I especially liked the trick of using a lambda to pass a concept as a template parameter, which isn't possible in the language right now. I recommend watching once it's available.
-
-## "C++ Modules: Getting Started Today"
-
-This talk from Andreas Weis helped to continue fueling my interest in C++ modules. It was a nice breakdown of how modules work in terms of the build process and why modules will be a benefit to all of our code bases. Even more importantly, the talk went into detail about the various types of module files and how to use them today. I see this talk as a great resource to help people dive into the world of modules, and I can't wait to share it with my coworkers to make modules less scary. Very few of us are out here writing new code from scratch, we all need to manage older code while progressing towards the future. It's well worth watching once it's available.
-
-## "Coping With Other People's Code"
-
-Speaking of old code, I absolutely loved Laura Savino's talk. Thankfully this talk was a plenary, so I didn't need to make any hard choices and skip anything else. I enjoyed how Laura intertwined the stories of legacy code with the stories of home improvement. Sometimes, a $25 bandaid fix that saves you $5000 in the future is exactly the type of fix you need. We shouldn't feel ashamed that it's not the most "correct" fix, we should instead feel proud that we're able to save the $5000. This also applies to legacy code and "letting things go", instead of only settling for the most perfect solution. Sometimes there are bigger fish to fry.
-
-I liked Laura's focus on the more human aspects of software development. Something I often say is, "we don't work *with* code, we work *on* code *with* people", and I felt this same idea in the talk. There can be more important things than work, like spending time with family. More specifically, it was refreshing to hear Laura talk about fathers taking time off work and spending time with their children. I think there's nothing more important, and I'm glad to hear this same sentiment shared in a plenary talk. This one is already [available on YouTube](https://youtu.be/qyz6sOVON68) as I'm writing this, and I highly recommend it.
-
-# My Lightning Talk
-
-The lightning talks, if you haven't heard of them, are rapid-fire 5 minute talks. They'll be released on YouTube as individual videos, but in real life they were back-to-back, on 2 of the nights.
-
-I gave a lightning talk! It was one of the most exciting parts of the conference, standing up in front of a few hundred people (I assume) and giving a talk on something I'm passionate about.
-
-I called it, "Help! My expression template type names are too long!"
-
-My talk was about a cool trick I found that shortens the names of your types in expression template code. I've been writing a lot of expression template code over the past year and a half, and this can help me reduce my compile times, since the compiler doesn't need to copy around these massive type names internally. Unfortunately, as of the time of me writing this, I have only figured out how to get this trick to work in MSVC. It's pretty easy to do type name shortening/erasure in any of the compilers, but only for a specific type at a time, not for a generic type that comes from expression templates and overloaded operators. I hope I can figure out how to do something similar in the other big compilers as well.
-
-This talk was especially exhilarating because I submitted it before I found the trick. On the Monday night of CppCon, I had this idea for a way to shorten my type names, and I didn't know if it worked. Armed with ignorance, I submitted my talk and then started panicking to try and make it work. I made some new connections with people during the next few days while discussing this problem. There's nothing like a good "nerd snipe" to bring us together.
-
-If you have the opportunity to give a talk and you're on the fence about whether you should, just do it. It's a great opportunity to share something you're passionate about. I'm happy I did it!
-
-# The Hallway Track
-
-This is a funny term I heard while at CppCon. The conversation went something like this.
-
-Me: "The talks are almost starting. Which one are you going to?"
-
-Other: "I'm not sure. Right now, the hallway track is looking pretty appealing."
-
-Me: "Hallway track?"
-
-Other: "Yeah I met so-and-so earlier today and I wanted to keep talking to them, so we'll see."
-
-Then it dawned on me what this person was talking about. They were referring to having conversations with other people, and cheekily calling it the "hallway track". I like that term, so I'm going to keep using it.
-
-To me this is the most important part. All the talks can be watched later on YouTube. It's not the same, you don't get to feel the energy of the room and look around to see everyone else equally as engaged, but you still receive the talk. The real importance of CppCon is meeting other professionals in your field with the same passions. Forging connections. It's the biggest part of CppCon that's irreplaceable online.
-
-It was nice to have face-to-face conversations about expression templates with people who know a lot about the subject. Or to have conversations about the pedantics of such-and-such language construct, or this build system, or that compiler. This isn't something I get to do every day. I like feeling a sense of broader community of being a C++ developer and enthusiast. Yes I already knew that this community exists, and yes I interact with people online, but doing it in person was completely different. The sense of community is much more immediate and tangible.
-
-Not to mention the strange feeling of being face-to-face with people who are well known in the C++ community. I was sitting at a talk, and I suddenly realized I was sitting directly behind Bjarne Stroustrup himself. Another time, I was standing in the hallway talking with a few people, and then Jason Turner walked up and joined the conversation. Now, I don't think idolizing people is healthy in general, but it's definitely a shock to see these people in person who I've only ever watched through a screen.
-
-# Summary
-
-The talks were amazing, especially giving a talk of my own. Even more importantly I made some new connections and friends. Nothing beats meeting people face-to-face, it makes the sense of community feel more visceral.
-
-This whole experience has given me a sense of belonging in the C++ world. After CppCon 2023, I feel inspired to start a local meetup to build more community. I'm thankful that I got the opportunity to attend, and I look forward to strengthening the connections I made.
-
-![CppCon 2023 Opening](/images/posts/braden/2023-10-19-CppCon2023Opening.jpg)
diff --git a/_posts/2023-10-25-SamsQ3Update.md b/_posts/2023-10-25-SamsQ3Update.md
deleted file mode 100644
index 7075dedb8..000000000
--- a/_posts/2023-10-25-SamsQ3Update.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Sam's Q3 Update
-author-id: sam
----
-
-Here's an overview of projects I have been working on the last few months.
-
-Github Actions self-hosted runners: based on https://github.com/philips-labs/terraform-aws-github-runner/ - Set up a second functional staging environment. Refactor code to work in both environments. Logic improvements. Sent doc updates upstream in PR. Also a PR to fix launching server instances. Added terraform in boostorg/beast, unordered, url. Improve AMI image builds. Rebuild with auth enabled, more disk space, packages. Debug build-tools installation issue on Windows 2022.
-
-Develop self-hosted runners admin server at gha.cpp.al: https://github.com/cppalliance/github-runner-admin. Has the capability to switch to self-hosted runners based on # of jobs as a decision algorithm. Implement prometheus/grafana data collection and monitoring of tagr system, including number of queued jobs, number of active runners launched, account billing status. Query github api to fetch statistics.
-
-Boostorg website cppalliance/temp-site: send Frank info on jfrog analytics. Add mailgun keys. Run the import releases scripts. Add variables, tokens, in kube files. Add domains to ingress. Implement social auth on prod websites. Debugging profile photo upload. Setting up backups for db1 and staging-db1 machines. Both snapshot backups and dumps to cloud storage. Deploy admin-server.boost.cpp.al server, for ansible and admin tasks related to website. Test multiple ways to use certificates with ingress. Report db connection leak, Frank fixed. Enabled versioning on S3 buckets. Tested restore procedure on directory in S3 bucket with versions enabled. Set up queries in GCP Cloud Logging, which discovers certain backend warnings/errors. Deploy site on preview.boost.org domain. Submitted and merged redis memorystore PR. Submitted and merged string quoting PR. Discuss boost-beta releases with Lacey.
-
-Mailman servers project: Modify ansible role to be able to only provision mailman-core, rather than full stack. Include database, postfix, nginx, in the role. Posting to mm3 mailing list about results. Deploy multiple production and staging mailman3 core servers. Future: should re-configure/replace those servers with boost.org domain instead of cppalliance.org domain. GKE networking topic, connection between core and web. Add feature in temp-site, hostAliases.
-
-Install and set up mrdox.com website on vm. Resize disk.
-
-Create an AWS account for Anarthal to work on BoostTech project. Sent PR to Zajo, switch drone to newer macs.
-
-Release tools. New feature in boostorg/release-tools to upload develop/master docs from the archives to s3 so the new website can host. Review and discuss release-tools with Alan. Add feature in publish_releases.py to upload to S3. Lint code in boostorg/release-tools. rebuild, upload 2204 image to dockerhub for release-tools, uses python-is-python3 package. Refactor build_docs CI, more efficiently display results. Debugging specific boost libraries with build_docs.
-
-old boost.org website: Assist Marshall with boost-tasks script. Review update-super-project.
-
-Fix JSON benchmarks. Discuss strategies with Dmitry.
-
-Drone machine OS upgrade.
diff --git a/_posts/2023-10-27-AlanQ3Update.md b/_posts/2023-10-27-AlanQ3Update.md
deleted file mode 100644
index f751780aa..000000000
--- a/_posts/2023-10-27-AlanQ3Update.md
+++ /dev/null
@@ -1,195 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: alan
-title: Alan's Q3 Update
-author-id: alan
----
-
-# Summary
-
-- [MrDocs](#mrdocs)
- - [Handlebars](#handlebars)
- - [The DOM](#the-dom)
- - [Javascript Bindings](#javascript-bindings)
- - [Unit tests](#unit-tests)
-- [Boost Website](#boost-website)
-- [Boost Release Tools](#boost-release-tools)
-- [Boost Libraries](#boost-libraries)
-- [C++ Github Actions](#c-github-actions)
-- [Gray-box local search](#gray-box-local-search)
-
-## MrDocs
-
-Over the last quarter, we have been working intensely on [MrDocs](https://github.com/cppalliance/mrdocs), a documentation generator for C++ projects. I've been overseeing and reviewing all the work done by the other contributors in the project. I've also been responsible for:
-
-- setting up and maintaining CI for the project;
-- MrDocs and LLVM release binaries;
-- build scripts;
-- setting up and integrating dependencies;
-- setting up and deploying the Antora toolchains and documentation to the project website;
-- working on supporting libraries; and
-- fixing bugs.
-
-These are some of the highlights of the work done in the last quarter:
-
-- Refactor library layout and scripts so all the implementation files are in [`src`](https://github.com/cppalliance/mrdocs/commit/04f75ddbeb666a65a3a8604b5cfaead1977e8c1c)
-- [Automated deployment](https://github.com/cppalliance/mrdocs/commit/5bd2cc494d82e10189041f138efc6a9abf3bd55e) of demo documentation to the project website. The website is updated on every commit to the `develop` or `master` branches. The action uses the actual release package to generate the documentation. This PR involved fixing all scripts and directory layouts for the release package to be properly usable. The new project layout uses the FHS standard. CMake modules to create a target to generate the documentation for projects were included in the installation. The MrDocs executable and libraries are installed as separate CMake package components. Any usage of FetchContent has been removed from the CMake scripts. The documentation was refactored to reflect the changes. Included complete instructions in the documentation pages for installation and usage, describing all commands and options.
-- Deployed [new LLVM binaries to the website](https://mrdox.com/llvm+clang/) and [updated CI](https://github.com/cppalliance/mrdocs/commit/2584328ab143d50d4d8289ac83f019f622200fa4). The binaries were regenerated for all platforms in 4 modes: `Debug`, `Release`, `RelWithDebInfo`, and `DebWithOpt`. This involved fixing long-standing bugs related to conflicts in LLVM versions used by the project and providing new pre-built binaries on the website. In particular, the previous pre-built binaries used a special ReleaseWithDebInfo LLVM configuration that caused conflicts with MrDocs on MSVC, being used with a Debug CMake configuration variant by developers. This eliminated the need for the ad-hoc GitHub LLVM binaries release and for the special docker container we had been using so far.
-- Added [support for CMakePresets](https://github.com/cppalliance/mrdocs/commit/ba63ed0f8cab4846dbff468b8d1f24d14f5d22c8). This allowed us to simplify the build process for MrDocs and its dependencies, which was previously counting on long command line commands and `CMakeUserPresets.json` files without a corresponding `CMakePresets.json` file. It was also a step towards the new installation instructions in the documentation. An example file for `CMakeUserPresets.json` including all compilers was provided. The base `CMakePresets.json` file included a special vendor configuration to hide base configurations from Visual Studio. `CMakeSettings.json` was deprecated.
-- Added complete [installation instructions](https://github.com/cppalliance/mrdocs/commit/34912248fbbd006b163c6bd438e30ff52efc4fac) for the project. The instructions were adapted so that all commands are relative to an enclosing directory containing all the dependencies and MrDocs. Included instructions for all steps considering package managers, installed dependencies, or binaries provided on the project website. The CMake scripts were adapted to make it easier to build the project in a variety of environments according to the instructions.
-- Included a [polyfill implementation of `std::expected`](https://github.com/cppalliance/mrdocs/commit/2e554c8b636f31815fb80656717e910e097fbb77) as `mrdocs::Expected`. This implementation is currently being used by MrDocs and support libraries.
-- [Refactored MrDocs generators](https://github.com/cppalliance/mrdocs/commit/63ac382438b6fa78041210f67f0736d0977a924b) to use our custom [C++ implementation](#handlebars) of the Handlebars template engine. Javascript helpers are loaded with [duktape](https://duktape.org/) with our [Javascript Bindings](#javascript-bindings) and compiled into functions in the Javascript engine.
-- Refactored the project name from MrDox to [MrDocs](https://github.com/cppalliance/mrdocs/commit/12c027f4f1b449570ae58b601634b29f5fdbfd3f)
-
-### Handlebars
-
-MrDocs includes a support library that reimplements the [Handlebars](https://handlebarsjs.com/) template engine in C++. This module is used to generate documentation from templates.
-
-Over the last quarter, this is the MrDocs support library in which I have been investing most of my time. All the development started in this same quarter in July. It already supports all features from the original Handlebars test suite, including all mustache features. The library is already integrated with MrDocs to generate the documentation for the project website.
-
-- [Initial proposal](https://github.com/cppalliance/mrdocs/commit/81a5b886d09999a0cd36e983349515e5d0ae6d27) of the C++ Handlebars library.
-- Fixed and refactored code that relied on [references to temporaries](https://github.com/cppalliance/mrdocs/commit/353fe987825023c9a886411c76798e93e27adabb) generated by the Dom.
-- Included support for [inverse blocks without helpers](https://github.com/cppalliance/mrdocs/commit/68491d0ee8a9d13088e2f0f96bd4aad6cfc78435)
-- All features, specs, and tests from the original Handlebars test suite were then ported to C++ and are now passing: [basic specs](https://github.com/cppalliance/mrdocs/commit/74fd1f357543e3097e58e1a1e5ed3992c918402b); [partials and automatic indentation](https://github.com/cppalliance/mrdocs/commit/5a0409a032d99c2efef374f74f3e2cb7fc80d49c); [whitespace control](https://github.com/cppalliance/mrdocs/commit/7e5250ea35e5e5e30175d5cc533337739f3d46f9); [block helpers, mustache blocks, and chained blocks](https://github.com/cppalliance/mrdocs/commit/95210f86884ff831d0a6a7d7f22aee05b6ec281b); [subexpressions](https://github.com/cppalliance/mrdocs/commit/f3e686136212ade78c2b0573bb2e81b9edbc01bf); [builtin helpers](https://github.com/cppalliance/mrdocs/commit/7510558dce3950059470d3ec9f11870f6b7354e2); [private data](https://github.com/cppalliance/mrdocs/commit/c1223af5f866351821f3dd69fa4ed7f0d6deb9a8); [helper formats](https://github.com/cppalliance/mrdocs/commit/8f674d5bb220f24539ec46430cae234ebeb832ee); [track-ids mode](https://github.com/cppalliance/mrdocs/commit/517d4d41e0456f134f79710326f2b05c0c213267); [strict mode](https://github.com/cppalliance/mrdocs/commit/deaa47c876e50e5018ac22ede69a79f35a611fa4); [util](https://github.com/cppalliance/mrdocs/commit/d10a92142e7420d7af4656048e548ee314fd9ff9); [mustache](https://github.com/cppalliance/mrdocs/commit/f63df18387c3d2461ba8d2ae7dbe9b103cc69a10). As many handlebars features were undocumented in the original implementation, adjusting our handlebars implementation, which was only designed to handle basic templates, to pass all tests from the original Handlebars test suite involved multiple significant refactors and improvements to the entire codebase. The tests are a superset of the mustache specs, which are also passing. The previously available [SafeString](https://github.com/cppalliance/mrdocs/commit/2f9fe70c30b0d8deeaf4bb104258802b6aa1f138) type because a regular `dom::Value` with the implementation of the specs.
-- Support for [`dom::Function`](https://github.com/cppalliance/mrdocs/commit/93a1bf991e4d754fdadb77dd3f1d175dfb77f60f) in all components of the Handlebars engine instead of custom callback types. This also allows the engine context to contain functions that can work similarly to helpers. The engine callback object is passed as the last argument in a helper, similar to the Javascript implementation. Because most built-in helpers support variable arguments, a new `dom::Function` implementation type was provided to support this use case.
-- Support for [error handling via `Expected`](https://github.com/cppalliance/mrdocs/commit/2e554c8b636f31815fb80656717e910e097fbb77). All functions that might fail have a variant that throws exceptions and another that returns an `Expected` value. Helpers functions are also allowed to propagate errors via `mrdocs::Error`.
-- Fixed a [bug](https://github.com/cppalliance/mrdocs/commit/bf64028b4ec2cdb6cef34e53259fdd99bf199a98) that caused MrDocs to emit `[object Object]` for `{{'\n'}}` after the [transition to C++ handlebars](https://github.com/cppalliance/mrdocs/commit/63ac382438b6fa78041210f67f0736d0977a924b).
-- The "find" helper was adjusted to [support strings, arrays, and objects](https://github.com/cppalliance/mrdocs/commit/60d79ff116a4eb7532aef218860e48818c2b95e2). This fixes a problem with the variants of similar but conflicting helpers that were categorized as container and string helpers.
-
-### The DOM
-
-MrDocs also includes a support library called "DOM" that provides a C++ interface to type-erased property trees, such as JSON and Javascript Objects. This module is used by MrDocs to create opaque representations of property trees that can be used by the Handlebars engine and other library functions. Such representations can come from a variety of sources, including JSON files, Javascript objects, and internal C++ objects with information parsed by MrDocs.
-
-After completing the Handlebars implementation, I also included [complete unit tests](https://github.com/cppalliance/mrdocs/commit/cc9e397f25e9f969e0569a79327754bd2e6b26fa) for the DOM. Tests were included for all DOM types and many bugs have been fixed.
-
-All classes have been documented to reflect their intended behavior, which is loosely modeled after JavaScript types and data structures. The APIs have also been adjusted to be safer and more consistent with the model for reference types. Objects and Value types received functions for nested object lookup and objects were generalized to support non-enumerable properties and Javascript bindings.
-
-### Javascript Bindings
-
-MrDocs includes a support library that wraps [duktape](https://duktape.org/) to provide a C++ interface to the Javascript engine. This module is used by the MrDocs executable to evaluate user-defined helpers.
-
-I also included [unit tests](https://github.com/cppalliance/mrdocs/commit/d5b7b3d1bf983cde57619314e49681e3c73c1a02) for Javascript wrapper and bindings.
-
-Throughout the process,
-
-- the implementation was completed for classes that were placeholders and existing bugs have been fixed
-- the API was documented
-- The javascript `Scope` object was extended to support all types of alternative syntax to evaluate expressions.
-- Value types: support for integers and floating point numbers.
-- Value types: support for all dom::Value operations using the native `duktape` functions
-- Provided classes to wrap javascript Objects, Arrays, and Functions as [DOM](#the-dom) values
-- `Scope` functions that might fail were adjusted to return `Expected` values
-
-### Unit Tests
-
-MrDocs includes a support library for unit tests. The library was initially adapted from the Boost.URL unit tests and extended to support the needs of MrDocs.
-
-I had previously implemented a smaller system for the Handlebars unit tests which was then [integrated with the boost.url test suite library](https://github.com/cppalliance/mrdocs/commit/e14fe087c2ecb7884f0af21b94ce34414506b3ef). Features from the handlebars test suite library were ported to the boost.url test suite library, including the expression decomposer and the diff algorithm for golden master tests.
-
-With this integration, Handlebars tests were then listed among any other tests in the library. These tests were later complemented with regular MrDocs tests.
-
-The decomposer has later been improved for [integral comparison operators](https://github.com/cppalliance/mrdocs/commit/831a691957de8266788dd42b3a4c1116c8f46505).
-
-## Boost Website
-
-In this last quarter, the Boost website went beta on https://www.preview.boost.org/. Among the many support projects for the website, I've been helping the most on the [`cppalliance/site-docs`](https://github.com/cppalliance/site-docs), which includes the Boost website documentation as an Antora project. Its components represent the "User Guide", "Contributor Guide", and "Formal Review" sections of the website.
-
-Since the inception of the project, I've been overseeing and reviewing all the work done by the other contributors to the project. I've also been responsible for:
-
-- setting up and maintaining CI for the project;
-- coordinating with [`cppalliance/temp-site`](https://github.com/cppalliance/temp-site) on content uploaded to AWS buckets;
-- build scripts to be reused by the release tools and previews;
-- writing sections of the documentation that require technical knowledge;
-- developing custom Boost/Antora extensions, such as the Boost Macro extension;
-- maintaining the Antora toolchain and templates; and
-- adjusted Boost libraries to match formats expected by the website.
-
-The Antora [playbooks were recently adjusted](https://github.com/cppalliance/site-docs/commit/aefae2a6062cc19a731e007bc28c275180e290fd) to initially contain no content sources, now that the Antora-enabled build process also implemented by me was deployed in the official Boost release process.
-
-## Boost Libraries
-
-As in other quarters, the Boost Library in which I have been investing most of my time is [Boost.URL](https://github.com/boostorg/url), since it's our most recently accepted library. The library is in maintenance mode since our focus shifted to MrDocs, but considering how recent it is, there is a constant demand for work fixing bugs and improving the documentation. In Boost.URL, I've been responsible for:
-
-- upgrading CI, mostly coordinating with the [C++ Github Actions](#c-github-actions);
-- maintaining, simplifying, and updating build scripts;
-- integrating more spec tests, such as the Ada tests included more recently;
-- including more examples, such as the more recent sanitize-URL example;
-- fixing documentation content that is out of date; and
-- fixing bugs.
-
-Besides bugs, the library was recently refactored to remove what was previously called a "header-only" mode and deprecated several aliases, which caused some small friction in this last quarter. These are some of the highlights of the work done in the last quarter:
-
-- Extended [fuzz testing](https://github.com/boostorg/url/commit/516e0093c55271a6ec9b9f271292fc29bcd586cd). `Fuzz` was included as a new clang factor in CI. The process was adjusted so that the corpus is properly reused from and stored in GitHub action archives. CMake scripts were refactored to include CMake options that control the fuzzer parameters. Fuzzers were included for each of the grammar rules for URLs.
-- [Support `IP-literal` as `IPv6addrz`](https://github.com/boostorg/url/commit/f2bb191b902ab63fa2207c64cfe273bd516a719d). This is an [issue](https://github.com/boostorg/url/issues/711) where a valid `IPv6addrz` wasn't being considered an `IP-literal`. IPv6addrz includes a `ZoneID` at the end, delimited by an encoded `"%25"`. The `ipv6_address` class is unmodified, as the mapping from the `ZoneID` to a `std::uint32_t` is dependent on the application context. The original `ZoneID` can be obtained from the url_view but the library is agnostic about it.
-- Included [GDB pretty printers](https://github.com/boostorg/url/commit/f3fe229c9d349d06083f9cdf1ae163b84b1ad1d8) and documentation. All available URL components are now pretty printed in GDB. A developer mode was also included which prints the URL components in a format corresponding to the internal URL string offsets.
-- Updated the [content of both the documentation and README.adoc](https://github.com/boostorg/url/commit/7e47e9fef6fecce45f7c65277601b7e7ff38c365) so that they match current best practices. The documentation in README.md contained dated and incorrect information, while the quickbook documentation was missing important information and contained bad practices.
-
-Some relevant bug fixes were:
-
-- Enforced that appropriate CMake BOOST_INCLUDE_LIBRARIES are set [according to the build options](https://github.com/boostorg/url/commit/d0746ebf941230d0c8a535859da2a0f7e6a747ca). The previous implementation included these extra libraries whenever they were available, which caused problems for other libraries that depended on Boost.URL via `depinst.py`.
-- Fix and include unit-tests for issues [#755](https://github.com/boostorg/url/issues/755) and [#446](https://github.com/boostorg/url/issues/446)
-- We [disabled](https://github.com/boostorg/url/commit/20ab896ffede3c4ac9cbfb6740e6e97f321ccd87) and [re-enabled](https://github.com/boostorg/url/commit/cac7c200e28e3559d4ea9ea43033bda5e8f66c39) drone caching. This is related to a bug where Drone would attempt to cache the `b2` binary, which would cause conflicts. This was fixed with [PR #123 in boostorg/boost-ci](https://github.com/boostorg/boost-ci/pull/213).
-- Remove quickbook [references to variant](https://github.com/boostorg/url/commit/b156eb230193e5f2d79980812106872f2d71c535). Links and references to `variant` in both the `.qbk` and `.xml` files were removed as the `variant` alias had been deprecated in [96438f6](https://github.com/boostorg/url/commit/96438f683e09e20183fab1b6059fa7f1b0ffe67d).
-- Updated [javadoc deprecated references](https://github.com/boostorg/url/commit/705554ca127cc1deb5d66efcdbd16cc593e31950). This fixed a mistake where the Javadoc for many deprecated aliases included references to the deprecated alias `boost::core::string_view` instead of the correct deprecated aliases `boost::optional`, `system::error_category`, `system::error_code`, `system::error_condition`, `system::system_error`, and `system::result`.
-- Replaced `@ref` prefixes [with backtick for references](https://github.com/boostorg/url/commit/3db1407cc9d792c1e192b401d31c2c12f607ec25) in javadocs. This caused an issue in the documentation since several aliases were deprecated in favor of symbols from Boost.Core.
-- [Refactored variable name in url::set_params](https://github.com/boostorg/url/commit/a1181275d02a0a4c6ab8147354f752ec36e1dd98). This issue was causing an error in the documentation for `url_base::set_params`.
-- [Fix bug](https://github.com/boostorg/url/commit/0ca58467a472c72e84405a7d991201f58ffdf327) where `url_view`/`string_view` constructor would require a non-`url_view_base` as input. This [cased the constructor to reparse the `string_view`](https://github.com/boostorg/url/issues/756) from an already parsed `url_view_base`.
-- [Fix bug](https://github.com/boostorg/url/commit/c97bc2782cdd9b343ede1492863a672805c255cd) where `parse_query` would recreate `std::string_view`s. This would make the query [include values in the underlying std::string beyond the expected string_view](https://github.com/boostorg/url/issues/757).
-- Updated [changelog](https://github.com/boostorg/website/commit/8c39fd223c5c9f74f0d70d611c35360415d862da) for boost release.
-
-Besides Boost.URL, as usual, I've been overseeing and fixing smaller issues with other boost libraries, such as Boost.Docca, Boost.StaticString, and helping with libraries by other contributors when asked for assistance, as in a more recent case with Boost.Outcome.
-
-In particular, we had to fix a smaller issue in Boost.Docca that was also affecting Boost.URL. The issue involved Boost.Docca's dependence on a deprecated version of Doxygen that is no longer supported by the Boost toolchain.
-
-## Boost Release Tools
-
-Over the last quarter, I've been working on the integration of toolchains I developed into the Boost Release Tools to add support for features desired for the new website.
-
-Some of the highlights of the work done in the last quarter:
-
-- Introduced support for libraries with Antora documentation [into the official Boost release process](https://github.com/boostorg/release-tools/commit/66670dfcf4d1ac69a963aa74cd9c06ffade73d58). Deployed new docker containers that include NodeJS, Gulp, and the Antora toolchain. With this enhancement, each library now can function as an Antora component within an Antora master project hosted in a separate repository (https://github.com/cppalliance/site-docs). This master project repository also contains additional components, such as the user guide, contributor guide, and a dedicated component for the review process. In a subsequent phase of the release process, this Antora documentation is seamlessly merged with the pre-existing in-source documentation, which has been generated using various other tools. When a library is "Antora-enabled", the release process will automatically generate the Antora documentation and publish it with the documentation of other libraries. No `b2` scripts are required to generate the documentation for the library. All Antora-enabled libraries use the same master Antora UI template that matches the design of the boost website. Antora [playbooks were adjusted](https://github.com/cppalliance/site-docs/commit/aefae2a6062cc19a731e007bc28c275180e290fd) to initially contain no content sources, now that the Antora-enabled build process in deployed in the official Boost release process.
-- Deployed new container [to `boostorg/boost`](https://github.com/boostorg/boost/commit/30f0ef1de2d8f205502d2a557ee0c9cb5a3b4708) to support the new release process.
-- New [archive variants](https://github.com/boostorg/release-tools/pull/52) for boost. Add extra archive variants such as `boost-docs` and `boost-source`. These variants can reduce expenses with JFrog download bandwidth, provide users with archives that are simpler to use, and provide docs-only archives for the website. The new MakeBoostDistro.py script includes parameters to determine what types of files should be included in the distribution. All other functions are adapted to handle these requirements accordingly. Switching to source-only downloads would save Boost $1000 per month.
-
-## C++ Github Actions
-
-[C++ Github Actions](https://github.com/alandefreitas/cpp-actions) is a project I created and have been maintaining since the second quarter of the year. It is a collection of reusable Github Actions for any C++ project that needs to be tested on a variety of compilers. Both MrDocs are Boost.URL are currently using these actions in their CI.
-
-The project includes actions to:
-
-- Generate a Github Actions Matrix for C++ projects;
-- Setup C++ compilers;
-- Install and setup packages;
-- Clone Boost modules;
-- Run complete CMake and `b2` workflows;
-- Generate changelogs from conventional commits;
-- Generate summaries; and
-- Generate time-trace reports and flame graphs
-
-These actions include a myriad of options and features.
-
-- The "setup-*" actions include logic to detect, install, and cache dependencies, which can be used by the CMake and `b2` actions.
-- Individual options and actions attempt to set up a wide variety of compilers on different platforms, including MSVC, GCC, Clang, MinGW, AppleClang, and Clang-CL.
-- Actions that generate reports include a multitude of tools and options to analyze changes, time traces, and coverage.
-
-Since then, these actions have been adapted as needed to support the needs of MrDocs and Boost.URL, which have also been using conventional commits. Here's a recent summary report generated for Boost.URL by the CI workflow: https://github.com/boostorg/url/actions/runs/6512424067
-
-The project documentation also uses the Antora UI template we have been maintaining for all other projects: https://alandefreitas.github.io/cpp-actions
-
-## Gray-box local search
-
-On 13 September 2023, the [following paper](https://link.springer.com/article/10.1007/s00500-023-09129-1) I co-authored was published:
-
-```
-Lopes, R.A., Freitas, A.R.R.
-Gray-box local search with groups of step sizes.
-Soft Computing.
-p. 1-14
-2023
-https://doi.org/10.1007/s00500-023-09129-1
-```
-
-The paper was accepted on 18 August 2023 and published on 13 September 2023.
-
-While the paper is more aligned with my educational background than my daily C++ Alliance tasks, it is one more paper that carries the C++ Alliance in the affiliation, contributing to its reputation.
diff --git a/_posts/2023-10-27-Christians2023Q3Update.md b/_posts/2023-10-27-Christians2023Q3Update.md
deleted file mode 100644
index 99f5061ef..000000000
--- a/_posts/2023-10-27-Christians2023Q3Update.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: christian
-title: Christian's Q3 2023 Update
-author-id: christian
-author-name: Christian Mazakas
----
-
-Development on Unordered has been proceeding smoothly. Now that Boost libraries are permitted to drop C++03 support, Unordered has seen some long-needed cleanup. Unordered was able to drop dependencies on Tuple and TypeTraits which means packagers like vcpkg can now create smaller downloads for users.
-
-In addition to this, a long-standing issue in Unordered was also fixed. We had committed the cardinal sin of storing raw pointers in the internals of the open-addressing containers. To this end, support for Allocators was only partially complete. With the latest Boost release, users will now be able to use Allocators which use fancy pointers such as those found in Boost.Interprocess with `boost::unordered_flat_map`.
-
-
-
diff --git a/_posts/2023-10-27-FernandoQ3Update.md b/_posts/2023-10-27-FernandoQ3Update.md
deleted file mode 100644
index 6e9bd3bc2..000000000
--- a/_posts/2023-10-27-FernandoQ3Update.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: fernando
-title: Fernando's Q3 Update
-author-id: fernando
----
-
-I'd like to share with you some of the initiatives and projects I've had the pleasure of collaborating on over the past few months.
-
-# Exploring Projects
-
-Engaged in detailed exploration of the following projects, diving deep into their codebases and documentation to gain comprehensive insights:
-
-- Boost Asio
-- Boost Beast
-- Boost URL
-- Boost Requests (not in Boost yet, by Klemens)
-
-It was a journey through these projects' philosophy, design choices, and intricacies.
-
-# Recent Adventures
-
-- Took a deep dive into Asio's new features, aiming for a more comprehensive grasp.
-- Collaborated with Klemens on tests for Boost Requests.
-- Got a close look at MrDox, understanding its structure and inner workings.
-- Appointed as the maintainer of Boost Beast and managed its latest release.
-- Mohammad has stepped in as the maintainer of Boost Beast, with my oversight. I'm ensuring everything is primed for the Boost 1.84 release.
-- Started to immerse myself in Boost's modularization efforts, getting familiar with b2 and the latest tweaks by René.
-
-# Constexpr Tools for Advanced Language Processing:
-
-I've ventured into the intriguing world of constexpr lexers, parsers, code generators, specifically targeting URLs and niche programming languages. The vision I pursued was to develop generic tools for constexpr lexing and parsing as an EDSL (Embedded Domain Specific Language). Moreover, I aimed to specialize in compiling a language similar to Array (like BQN, UIUA, ...) and generating code during compile time. The allure behind this approach was to leverage the natural affinity these languages have with vectorization (SIMD).
-
-While I find this exploration deeply enthralling and see its immense potential, I've encountered challenges in pitching the idea. Locating concrete examples of its real-world application proved elusive, making it seem more like a research and lab endeavor rather than a tangible solution.
-
-# Envisioning Boost's Modular Future
-
-One area that truly excites me is the modularization of Boost. Boost currently uses two build tools: CMake and b2. With b2 evolving to embrace a modular Boost, it's thrilling to think of a future where users can seamlessly integrate any Boost library and its dependencies. Python, JavaScript, and Rust developers have this luxury. Why not C++? Boost's modularization could be a game-changer in making C++ more welcoming to new and seasoned developers alike.
-
-# Boost Asio: Making Networking Approachable
-
-Klemens came up with a brilliant idea: a tutorial or guidebook for Boost Asio. I think it's a fantastic initiative and I'd love to chip in and help out where I can.
-
-Boost Asio is a standout within Boost for its networking and low-level I/O capabilities. Yet, its complexities can sometimes be daunting for those new to it. This guidebook is imagined to bridge that gap, aiming to make Boost Asio more accessible and empowering newcomers to tap into its full potential.
diff --git a/_posts/2023-10-27-Joaquins2023Q3Update.md b/_posts/2023-10-27-Joaquins2023Q3Update.md
deleted file mode 100644
index c4836c2f0..000000000
--- a/_posts/2023-10-27-Joaquins2023Q3Update.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: joaquin
-title: Joaquín's Q3 2023 Update
-author-id: joaquin
-author-name: Joaquín M López Muñoz
----
-
-During Q3 2023, I've been working (mostly in collaboration wth Chris) in the following areas:
-
-### Boost.Unordered
-
-* Shipped [Boost.Unordered 1.83](https://www.boost.org/doc/libs/1_83_0/libs/unordered/doc/html/unordered.html#changes_release_1_83_0_major_update).
-* I've written the article ["Inside boost::concurrent_flat_map"](https://bannalia.blogspot.com/2023/07/inside-boostconcurrentflatmap.html)
-explaining the internal data structure and algorithms of this new container.
-* I developed an improvement to concurrent containers for visit operations (branch [`feature/optimized_visit_access`](https://github.com/boostorg/unordered/tree/feature/optimized_visit_access))
-that takes advantage of const-accepting visitation functions even when passed to a non-const
-`visit` overload. For instance, the following:
-`m.visit(k, [](const auto& x){ res += x.second; });`
-uses non-const `visit` even though the visitation function does not modify `x`. The optimization
-detects this circumstance and grants the visitation function _shared_ rather than
-exclusive access to `x`, which potentially increases performance. At the end, we
-decided not to include this optimization as it's hard to communicate to the
-user and was deemed too smart --the simple alternative is to use `cvisit`.
-* Added `[c]visit_while` operations to concurrent containers, with serial and parallel variants
-(target Boost 1.84).
-* Added debug-mode mechanisms for detecting illegal reentrancies into a concurrent
-container from user code (target Boost 1.84).
-* Added efficient move construction of `boost::unordered_flat_(map|set)` from
-`boost::concurrent_flat_(map|set)` and vice versa (target Boost 1.84).
-* Worked on supporting [Clang Thread Safety Analsysis](https://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-(branch [`feature/clang_thread_safety_analysis`](https://github.com/boostorg/unordered/tree/feature/clang_thread_safety_analysis)).
-This work was eventually abandoned because the analysis is rather limited --for instance,
-it's strictly [intraprocedural](https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-inlining)
-and can't detect issues with user-provided visitation functions.
-* Supported Chris in the addition of support for fancy pointers to open-addressing and
-concurrent containers. This enables scenarios like the use of Boost.Interprocess allocators
-to construct containers in shared memory (target Boost 1.84).
-* Added Boost.Serialization support to all containers and their (non-local)
-iterator types (target Boost 1.84).
-* Solved issue [#205](https://github.com/boostorg/unordered/issues/205).
-* Added `boost::concurrent_flat_set` (target Boost 1.84).
-* Reviewed PR [#215](https://github.com/boostorg/unordered/pull/215) for
-code clean-up after upgrading from C++03 to C++11 as our new minimum
-for closed-addressing containers.
-
-### CppCon 2023
-
-* With the help and feedback from many, I've prepared a poster and a brochure
-to help publicize the latest developments of Boost at this event.
diff --git a/_posts/2023-10-27-KlemensQ3Update.md b/_posts/2023-10-27-KlemensQ3Update.md
deleted file mode 100644
index d0d90cb2b..000000000
--- a/_posts/2023-10-27-KlemensQ3Update.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: q3_update
-title: Klemens Q3 Update
-author-id: klemens
----
-
-Over the last few months I have been mainly working on boost.async and getting it ready review - twice.
-It had two review periods (August, with too low a turnout) and one in late September which it passed conditionally.
-
-The time between both review periods gave me time to improve the library based on the feedback of the first.
-
-One of the conditions was a new name, so it's `cobalt` instead of `async` now and will be included in boost.
-
-Additionally, I've been busy with training colleagues, beast & process maintenance work and invested some time into refactoring boost.requests.
-
-
diff --git a/_posts/2023-10-27-MattsQ3Update.md b/_posts/2023-10-27-MattsQ3Update.md
deleted file mode 100644
index fc3674944..000000000
--- a/_posts/2023-10-27-MattsQ3Update.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: matt
-title: Matt's Q3 2023 Update
-author-id: matt
-author-name: Matt Borland
----
-
-Over the past few months I have primarily been working on two libraries: charconv and decimal.
-
-Charconv (https://github.com/cppalliance/charconv) is now complete.
-The library received endorsement on the Mailing List this month, and is now waiting to be scheduled for formal review.
-In the meantime feel free to test it.
-Directions for use with B2, VCPKG, and Conan are provided.
-All feedback is welcome and appreciated.
-
-Decimal (https://github.com/cppalliance/decimal) is a ground-up implementation of the IEEE 754 Decimal Floating Point types in C++14, and is co-authored by Chris Kormanyos.
-Our goal with this library is for it to be indistinguishable from the built-in types you are used to.
-This includes complete support for normal standard library functions in cmath, cstdlib, etc. as well as interoperability with Boost.Math for higher level support.
-The working basis for the library is IEEE 754-2019 and TR 24733 with our changes to modern C++.
-The library is still in early development stages, and discussion can be found on the cpp-lang slack under #boost-decimal.
-
-
diff --git a/_posts/2023-10-27-MohammadsQ3Update.md b/_posts/2023-10-27-MohammadsQ3Update.md
deleted file mode 100644
index 5354faf18..000000000
--- a/_posts/2023-10-27-MohammadsQ3Update.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: mohammad
-title: Mohammad's Q3 2023 Update
-author-id: mohammad
----
-
-During Q3 2023, I've been making progress in the following areas:
-
-### Psql
-
-[Psql](https://github.com/ashtum/psql) is a C++ PostgreSQL client based on Boost.Asio and libpq. I initiated this project with the aim of creating an easy-to-use and easy-to-maintain client-side library. In this quarter, I achieved the following milestones:
-
-- Gained a comprehensive understanding of the libpq interface and its utilization in asynchronous mode.
-- Explored various possibilities for the `connection` and `connection_pool` interfaces.
-- Implemented support for pipelined queries.
-- Created user-friendly C++ wrappers for libpq result types, simplifying the conversion between different types.
-- Introduced an interface for receiving PostgreSQL notifications.
-- Enhanced support for working with user-defined types and automatic retrieval of their Oid upon query.
-
-### Boost.Beast
-
-I have recently become more involved in the maintenance of Boost.Beast. Since Klemens is currently occupied with Boost.Cobalt and [not yet Boost].Request, I have contributed minor PRs such as [fixing the issue of using `asio::deferred` as a completion token](https://github.com/boostorg/beast/pull/2728). Additionally, my primary focus has been addressing user issues.
-
-### Search Functionality for the New Website
-
-I made some adjustments to the [boost-gecko](https://github.com/cppalliance/boost-gecko/tree/new-website) project to better align with the new website's style and adapt to dark/light mode switching.
diff --git a/_posts/2023-10-27-PeterTurcan-Q3.md b/_posts/2023-10-27-PeterTurcan-Q3.md
deleted file mode 100644
index 0ceb4b5f1..000000000
--- a/_posts/2023-10-27-PeterTurcan-Q3.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: peter
-title: Peter Turcan Q3 2023 Status
-author-id: peter
-author-name: Peter Turcan
----
-
-* Wrote a Version Control topic, including sections on the super-project, breaking changes, and filing issues, for the Contributor Guide
-* Wrote a Continuous Integration (CI) topic, with sections on the range of tools available, and including best practices and links to good examples
-* Converted the legacy About Boost doc to a new topic on Boost History, adding sections on libraries and timelines
-* Changed all references to Boost.build to B2, providing a consistent URL to link to for B2
-* Improved navigation to the Boost license from the User Guide
-* Converted legacy HTML documents for the Contributor Guide to markdown, using Pandoc then manual clean up
-* Corrected ease-of-use and accuracy issues with Getting Started with Linux
-* Corrected library naming issues (Windows vs Linux)
-* Updated the legacy Headers and Separate Compilation topics
-* Updated and clarified the section on creating New Issues
-* Removed unnecessary escape characters from several topics in the Contributor Guide
-* Wrote a topic on library Release Notes, adding best practices and links to good examples
-* Reworked the Templates documentation, compressing into one file and updating
-* Wrote a simpler introduction to Testing
-* Updated the Testing Policy topic, breaking the Testing sections into more manageable chunks
-* Reviewed the documentation for the proposed Boost.async library
diff --git a/_posts/2023-10-27-Rubens2023Q3Update.md b/_posts/2023-10-27-Rubens2023Q3Update.md
deleted file mode 100644
index c4841684e..000000000
--- a/_posts/2023-10-27-Rubens2023Q3Update.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: ruben
-title: Ruben's Q3 2023 Update
-author-id: ruben
-author-name: Rubén Pérez Hidalgo
----
-
-During Q3 2023, I've been making progress in the following areas:
-
-### BoostServerTech
-
-As you may know, [BoostServerTech](https://github.com/anarthal/servertech-chat) is a series of projects that showcase how Boost can be used
-to create high-performance web applications. It's a place to showcase Boost technology to both
-authors and users, and try new stuff.
-
-I originally created the project in July as a "test lab", to guide Boost.MySQL development.
-During this Q3, the project has advanced a lot:
-
-- Created the [project proposal](https://docs.google.com/document/d/1ZQrod1crs8EaNLLqSYIRMacwR3Rv0hC5l-gfL-jOp2M/edit).
-- Developed the proposed chat application prototype, including a React front-end and a C++ backend using Boost.Beast and Boost.Redis.
-- Made the project robust by developing a unit test suite for both front-end and back-end, and an integration test suite.
-- Deployed the project to production using AWS and containers, featuring a CloudFormation template that makes new deployments very easy.
-- Wrote extensive documentation about the project, so it can be useful for other developers, as well as other Boost authors who want to write their own ServerTech application.
-- Extended the server's functionality by adding authentication and a MySQL database (using Boost.MySQL).
-
-### Boost.Cobalt feedback and review
-
-I've been quite involved in Boost.Cobalt (former Boost.Async) submission and review process:
-
-- Provided Klemens with early feedback about documentation, prior to review.
-- I wrote a fork of the BoostServerTech chat application using Boost.Cobalt. This uncovered some issues and yielded some useful experience that the author has already incorporated into the library.
-- I wrote my review with all this.
-
-### Boost.MySQL
-
-- Implemented fuzz testing for Boost.MySQL serialization functions.
-- Fixed minor issues and released Boost.MySQL 1.83.
-- Implemented `connection::reset_session`, which allows cleaning session state and is a requirement for connection pooling.
-
-### Site docs
-
-I've done some contributions to the new Boost documentation:
-
-- I wrote the [fuzz testing guide](https://docs.cppalliance.org/contributor-guide/testing/fuzzing.html).
-- I provided some feedback about other pages.
diff --git a/_posts/2023-10-30-dmitrys-q3-update.md b/_posts/2023-10-30-dmitrys-q3-update.md
deleted file mode 100644
index 344953e41..000000000
--- a/_posts/2023-10-30-dmitrys-q3-update.md
+++ /dev/null
@@ -1,86 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: Dmitry's Q3 2023 Update
-author-id: dmitry
-author-name: Dmitry Arkhipov
----
-
-In the third quarter my work was mostly focused on improvements to JSON's
-conversion features. In Boost.JSON conversion between user types and library
-containers is done with `value_to` and `value_from` functions. Conversions of
-composite types are attempted recursively. The library provides conversions for
-several common groups of types, including sequences, associative containers,
-tuples, numbers, and strings. Users also have the option to implement
-conversion for other types. The function `value_to` can fail at runtime, when
-the structure of JSON value differs from the one expected by conversion
-implementation. The function can report those errors in two separate ways:
-exceptions and error codes. This is not only expressed in the
-front-end—available overloads for `value_to`—but is also supported on the
-back-end. In other words, users can report errors from their conversions using
-either exceptions or error codes. And the library turns error codes into
-exceptions if on the front-end exceptions were requested, and even attempts the
-opposite conversion where possible.
-
-There was a problem though, when on the front-end exceptions were requested,
-and conversion of the deepest types are using exceptions too, but higher up
-errors are reported via error codes, there is a likelihood that the exception
-will be swallowed, and then a non-discriminate "some error has occurred"
-exception will be thrown instead. In order to fix this, I implemented a
-mechanism that communicates user's choice of error reporting to the back-end.
-
-Another change to the `value_to/from` functions was the addition of
-`is_optional_like` and `is_variant_like` traits. They determine if a type can
-be classified as optional or variant correspondingly. Previously the library
-explicitly handled `std::optional` and `std::variant`, and
-support for `boost::variant2::variant` was provided in its own library. With
-these new traits all types that are sufficiently close to the standard optional
-and variant are handled. In the case of optional it has an additional benefit:
-conversion for described classes does not treat missing members as an error, if
-their types are optionals.
-
-But the biggest amount of time was spent on the new feature: direct parsing.
-Even during the Boost review of Boost.JSON some people have complained that
-they would prefer to avoid going through JSON containers entirely and parse
-directly into their types. This is now possible with the function `parse_into`.
-Benchmarks also show that it can potentially double the performance. The design
-and most of implementation was provided by Peter Dimov a while ago. So,
-I mostly only had to refactor it to reduce code duplication, and change
-behaviour of some functions, so that the result is to that of `value_to/from`.
-I also added an implementation for optionals, and provided a different
-implementation for variants.
-
-That last one deserves some explanation. Back when we were discussing different
-ways to convert to and from variants, we chose a seamless approach, where the
-variant itself doesn't add anything to representation, but its current
-alternative is represented directly. This is what most JSON files use in
-practice, but it does complicate conversion back from JSON into variant.
-`value_to` attempts conversions for each alternative and picks the first one
-which succeeds. This approach is not immediately possible for direct parsing,
-though, as the choice of alternative has to be made before the full source of
-the value is available. Peter's solution was to only support variants for
-which it was easy to discriminate between alternatives. This does eliminate
-whole classes of JSON documents, though, in which variants of very similar
-alternatives (usually objects) are used. So, I've dealt with variants
-differently: the implementation instead records parser events and replays them
-for the next alternative if the current one fails.
-
-That approach has a consequence: for variants we need to dynamically allocate
-and keep a sequence of events. This can potentially eliminate all performance
-benefits, if the variant is the topmost container. To mitigate this I also
-implemented a way to limit the amount of parser events variant conversion can
-replay. For some variants a fairly small limit would be enough and completely
-eliminate the need for additional allocations. Ultimately, I decided not to
-merge that customisation, and wait for user feedback.
-
-Finally, there was one notable change to Boost.JSON unrelated to conversion.
-Different C++ implementations disagreed whether `value jv{ value() }`
-copy-constructs `jv` or uses construction from `initializer_list`. This
-resulted in code that behaves differently for different implementations.
-Unfortunately we couldn't fix it on our end, this is just a discrepancy between
-how implementations treat this syntax. But eventually I came to realisation
-that it can be handled explicitly to be a copy by the `initializer_list`
-constructor. The constructor now treats `initializer_list`s of size 1 as an
-attempt to copy. If you do want an array of size 1, you can use `value jv{
-array{x} }`.
diff --git a/_posts/2023-10-31-KrystianQ3Update.md b/_posts/2023-10-31-KrystianQ3Update.md
deleted file mode 100644
index 582a5d571..000000000
--- a/_posts/2023-10-31-KrystianQ3Update.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's Q3 Update
-author-id: krystian
----
-
-My primary focus this quarter was getting MrDocs into a state where it can serve as a drop-in replacement for Doxygen/Docca in Boost.URL. Before diving into that, there are a few smaller things I addressed in other projects:
-
-## Boost.StaticString
-* Added support for platforms lacking `wchar_t`/`wsnprintf`
-
-## Docca
-* Added backward compatibility for operator names. Doxygen 1.8.15 and older generate operator names containing a space between `operator` and the subsequent tokens. This behavior changed in newer versions, meaning that the new names must be converted to the old format to avoid breaking existing references to these functions.
-* Suppressed generation of private friends. This was necessary because such declarations would "hide" the primary declaration and result in broken links.
-* Stripped auto-generated links within code blocks due to incorrect rendering.
-
-## MrDocs
-Switching focus to MrDocs, I implemented many major features:
-
-### Dependency extraction
-When symbols are referenced by a declaration, dependency extraction controls whether the referenced symbol will be extracted, irrespective of whether it was declared within the project directory. My initial naive implementation would extract such symbols unconditionally, but I later added a more refined mode where dependency extraction only occurs for:
-* Local classes which are deduced as the return type of an extracted function, and
-* Base classes of an extracted class
-These cases are the only ones in which a referenced symbol affects the "interface" of another, hence the term "dependency." A final mode that disables dependency extraction completely was also added.
-
-### Safe names
-The "safe name" of a symbol is a prettier but unique name for a symbol that can be used as an alternative to the base16/base64 representation of a `SymbolID`. These names also have the property of being path/URL safe, as their intended purpose is for use as filenames when generating the output.
-Broadly, safe names are generated by collecting all symbols with the same name in a given scope, and then appending digits from the base16 representation of the `SymbolID` until all names are unique. For example, the safe name for `void A::f();` will be `A-f` in the absence of other overloads. If there exists an overload `void A::f(int);`, then a possible set of safe names could be `A-f-0a` and `A-f-04`.
-
-### Symbol filtering
-Symbol filtering permits the exclusion of symbols matching a pattern from being extracted. Filters are specified as C++ _id-expressions_, except that wildcards (`*`) may be used to zero or more occurrences of any character. The primary purpose of filters is to exclude symbols from detail namespaces (e.g., using the pattern `*::detail`). In addition to excluded patterns, it is also possible to specify included patterns to override matches; these patterns are meaningless unless they match a subset of symbols matched by an excluded pattern. For example, the excluded pattern `A::B` combined with the included pattern `A::B::f*` means only the symbols in `A::B` beginning with `f` are to be extracted. Internally, filters are converted into a tree that is traversed alongside the AST; this avoids the need to check every pattern each time a new symbol is extracted.
-
-### Symbol lookup
-Symbol lookup is the mechanism by which the `@ref` and `@copydoc` commands are implemented; it performs a simplified version of C++ name lookup for the given _id-expression_ within the set of all extracted symbols. The current implementation is far from complete (e.g., no ambiguity resolution is performed, and the semantics of constructs like inline namespaces, using declarations, using directives, and injected-class-names are not implemented), but it's sufficient for Boost.URL's documentation. Lookup is deferred until all symbols have been extracted to support cross-TU references without forward declarations.
-
-## Clang
-
-The backbone of MrDocs is the clang compiler, which, given the nature of software, is not without its bugs. Working around them is only feasible to a certain extent, meaning that at some point it becomes necessary to fix them instead of waiting for others to do so. To address this, I have spent considerable time this quarter getting comfortable with hacking clang and familiarizing myself with the process of merging patches into LLVM. Thus far, I have submitted [one PR that has been merged](https://github.com/llvm/llvm-project/pull/66636) which eliminates the `ClassScopeFunctionSpecializationDecl` AST node in favor of using `DependentFunctionTemplateSpecializationInfo` to represent dependent class scope explicit specializations of function templates. The primary motivation for this patch was to simplify `ASTVisitor::traverse` in MrDocs by using the same overload to handle all function declaration nodes. However, this patch also improves diagnostics for the following example, insofar that the lack of a primary template will be diagnosed prior to instantiation:
-```cpp
-template
-struct A
-{
- template<>
- void f(int);
-};
-```
-
-I have also been working on patches for other bugs related to function template specializations, e.g., diagnosing friend function template specializations which are definitions, ensuring that lookup for friend function template specializations considers inline namespaces, diagnosing unexpanded packs in class scope function template specializations, etc.
-
-Another related aspect of explicit function template specializations I have been working on is template argument deduction. The current implementation of template argument deduction for function templates implicitly instantiates a specialization for the deduced arguments, which is undesirable (and non-conforming) when the deduction is done for the purposes of matching an explicit specialization to its primary template. I wrote a proof-of-concept implementation in which this implicit instantiation is eliminated, but I am not planning to pursue these changes until a later date when I have more time available to propose these changes.
-
-Finally, I have been working on some AST memory optimizations, namely for data common to all redeclarations of an entity. This is done by replacing `Redeclarable::First` (which stores a pointer to the first declaration in a redeclaration chain) with a pointer to a common base `Common`:
-```cpp
-struct Common
-{
- decl_type* First;
-};
-```
-Allocated by calling `decl_type::newCommon`, which permits `decl_type` to allocate a `Redeclarable::Common` derived object to store additional common data. This can, for example, be used by `CXXRecordDecl` to store a single `DefinitionData` pointer for all redeclarations, as opposed to storing it in each `CXXRecordDecl` and propagating it upon allocation. This also eliminates the need for `RedeclarableTemplate`'s common pointer, as it can be merged into `Redeclarable::Common`.
diff --git a/_posts/2023-10-31-SpencerQ3Update.md b/_posts/2023-10-31-SpencerQ3Update.md
deleted file mode 100644
index 605bc14c3..000000000
--- a/_posts/2023-10-31-SpencerQ3Update.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: q3_update
-title: Spencer's Q3 2023 Update
-author-id: spencer
-author-name: Spencer Strickland
----
-
-During Q3 2023, my work has been to get the new Boost website ready to launch. Though I've been a fullstack developer most of my career, I came in to focus more on the front-end development. I started right before Q3, so much of the site architecture was already in place. Using TailwindCSS, which I was happy about, as I'd been using it for quite a while.
-
-The bulk of my work was to flesh out the sections of the site, clean up the styling, and to provide an overall better user experience. The team that had built the site to that point (Revsys) is still on board, and that's been great - they're Django & Python specialists, while I'd only built one site using Django in 2019.
-
-The main things that jumped out at me when I started, that were keeping it from being in a launchable state, were that there were a number of pages with no content at all - and pages that did exist were inconsistent with each other in terms of the styling.
-
-### Getting started
-
-* Had a few long, fun, and very productive huddles with Vinnie early on, going section-by-section to talk about what we liked and what would look best. Our sense of what works, both from the aesthetics angle and the user experience, matches up well.
-* Reviewed the existing codebase while getting enough up-to-speed again with Django & Python.
-* Determined what was still missing from the site and, if not creating it right away, making sure there was a workable plan on how to create it when ready.
-
-### Customizing the site
-
-* Creating consistency amongst all pages of the site, including standardizing the display of similar sections across the site.
-* Addressing existing style issues / items that hadn't been looked at yet, such as text overflowing, checking that text aligns properly and consistently.
-* Making the site mobile-ready. Typically I would do this first when building a new site, and expanding from there, but either way works for me!
-* Fleshing out / getting to an almost-ready state the homepage and the Learn, Community, and Releases sections.
-* The Libraries section had had the most work done when I started, but I worked on updating the styles, fleshing out the library detail page more by including information that wasn't being displayed, and implementing the chart for Commits per Month.
-* Completely redesigned the Learn section after we had some amazing illustrations made for it.
-* Updated the homepage to be dynamic with the help of Revsys, created new elements that made sense, and brought in styling I'd worked on for the rest of the site to clean it up and bring it all together.
-* Too many tweaks to mention.
-
-### Upgrade Guide
-
-Many items are still works-in-progress, and being a website, that's usually always the case. Once we have fully launched, I would like to be able to spec out new features and sections ahead of time, as I think it makes development much easier to have everything planned and explicitly documented - though, always flexible.
-
-### Next steps
-
-Throughout the quarter, I ran into many features or elements that I've seen newer ways of handling, or that I may have done differently myself in the past. Some work that I had done, I had to make the decision to cut. Once we've launched, I am hoping for some opportunities to revisit some of those changes. I also look forward to hearing from the Boost community once they've had a chance to use the site. Once we have the solid base that we've been building ready to go, it's their opinions and the features they want that will matter most.
\ No newline at end of file
diff --git a/_posts/2024-01-10-ChristiansQ4Update.md b/_posts/2024-01-10-ChristiansQ4Update.md
deleted file mode 100644
index c13b293dd..000000000
--- a/_posts/2024-01-10-ChristiansQ4Update.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: christian
-title: Christian's Q4 2023 Update
-author-id: christian
-author-name: Christian Mazakas
----
-
-This last quarter has been an interesting one. Development on the Unordered
-library has slowed down so I've been re-tasked with aiding in the development
-of a whole new slew of HTTP/1-based libraries.
-
-The new year is a common time for reflection on where one's been and how far one
-has come. When I first started working on Unordered, I knew relatively little
-about hash tables. I was somewhat versed in C++ container design and
-implementation but in hindsight, I knew little to nothing in actuality.
-
-I've now since become an expert in library minutiae. As an example, I spent no
-less than an hour discussing the validity of allocator-constructing a
-stack-local variable as part of an optimization technique for Unordered's flat
-maps.
-
-It's been quite a privilege to essentially study C++ under a couple of world
-experts, Joaquín M López Muñoz and Peter Dimov. I'll never be able to see
-hash table design the way Joaquín does but his incredibly sharp and compact way
-of solving complex problems has forever changed how I write C++ code. On the
-other hand, Peter's helped guide and shape how I think about testing and
-actualizing it in code effectively.
-
-My new found aptitude for testing has led to a shift in how I develop software
-going forward, something I'm calling "failure driven development". While most
-TDD workflows involve starting with a failing test case first, they don't often
-stress the importance of testing failures themselves. For example, code that
-opens a file and the file does not exist. I've applied the principles I learned
-on the job to my hobby projects and because of this, I've actually found a bug
-in the io_uring Linux kernel module. I also helped diagnose a performance
-regression as well.
-
-A principle I've learned is that you don't really understand code or a system
-until you test what kinds of errors it outputs and how it behaves under those
-conditions.
-
-I look forward to the future in helping deliver these HTTP/1 libraries as
-they're going to be dramatic improvements over the existing Beast library but
-I'll never forget what Unordered taught me.
-
-It's interesting working for a fully remote company like the Alliance because
-my coworkers are scattered all over the globe, from Spain to Bulgaria and
-beyond. Expertise is scattered all throughout the world and it's amazing how
-technology enables so much collaboration. It also enables me to hone my skills
-in slow-cooking recipes and I'm now on a quest to completely master the dish
-chile colorado.
diff --git a/_posts/2024-01-10-Fernando43Update.md b/_posts/2024-01-10-Fernando43Update.md
deleted file mode 100644
index 163e1465a..000000000
--- a/_posts/2024-01-10-Fernando43Update.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: fernando
-title: Fernando's Q4 Update
-author-id: fernando
----
-
-As the year comes to a close, I reflect on the exciting and productive final quarter of 2023. My involvement has been primarily focused on the Boost Libraries and the development of MrDocs, both of which have offered unique challenges and opportunities for growth.
-
-# Boost Modularization: Refining and Collaborating
-
-## Ongoing Boost Modularization Work:
-
-My primary focus has been on advancing the modularization of Boost. The effort is directed towards creating individual Conan packages for each Boost library, an initiative that promises to significantly enhance user experience and integration.
-
-## Strategic Discussions and Collaborative Efforts:
-
-Discussions with the Conan team have been ongoing, focusing on the practical aspects of creating modularized Conan recipes. A new repository, separate from the Conan Center Index (CCI), is in the works to facilitate these efforts with fewer restrictions. I have also been developing a web application using HTML, CSS, JavaScript, and a graph management library to assist in visualizing the dependencies of Boost libraries.
-
-# Boost Unordered: Collaborative Advancements
-
-## Responsive Action Triggered by Parlay's Performance:
-
-The impressive speed demonstrated by [Parlay](https://github.com/cmuparlay/parlayhash) set the stage for our action. It highlighted the need to enhance the performance of our concurrent data structures, particularly in scenarios involving high thread counts.
-
-## Joaquín's Innovative Ideas and My Role in Testing:
-
-Joaquín, leading the charge, conceptualized various strategies to improve Boost Unordered. His ideas were pivotal in shaping our approach towards creating more efficient concurrent data structures. My contribution primarily involved conducting extensive testing and benchmarking of these ideas on high-core-count machines. This collaboration was instrumental in validating and refining our approaches.
-
-## Learning and Contributing to Boost FlatMap and Concurrent FlatMap:
-
-Gradually delving deeper into the implementation of Boost FlatMap and Concurrent FlatMap, I am moving towards a position where I can actively contribute code and ideas. The learning curve is steep, but it is an exciting journey that promises significant contributions to the Boost ecosystem.
-
-Joaquín’s work in developing a latch-free concurrent map and a variant of `boost::concurrent_flat_map` that performs almost-latch-free insertion for SIMD-powered architectures is a testament to our team's commitment to pushing the boundaries of C++ performance. His insights and our collaborative efforts are paving the way for potential enhancements in concurrent data structures, which are fundamental to high-performance computing applications.
-
-The developments in this domain are ongoing, and we are continuously working to identify and overcome points of contention. Our aim is to not only match but exceed the performance benchmarks set by competitors like Parlay, especially in high-thread environments. This journey, though challenging, is a remarkable opportunity for innovation and growth in the field of concurrent programming.
-
-# Deepening Involvement in MrDocs
-
-## Enhancing User Experience with MrDocs:
-
-- Streamlining the Workflow:
-I've dedicated significant effort to enhancing the user experience of MrDocs. A key development is enabling MrDocs to directly obtain the compile_commands.json file by invoking CMake. This improvement alleviates the need for users to generate this file manually, thus simplifying the process.
-
-- Intelligent Inference of System's Default Include Paths:
-Another crucial enhancement is the capability of MrDocs to intelligently infer the system's default include paths from any arbitrary compile_commands.json. This is achieved by having MrDocs interact with the compiler to request information about the default include directories. These directories are then utilized in creating the "Compilation Database", making MrDocs more intuitive and efficient for various C++ projects.
-
-## Gaining Experience with Clang's LibTooling:
-
-- Deep Dive into LibTooling: As MrDocs extensively uses Clang's LibTooling, I am focusing on gaining more experience with this powerful library. This involves understanding its intricacies and exploring its capabilities in parsing and analyzing C++ code. My journey with LibTooling is not only about enhancing MrDocs but also about enriching my own skills and understanding of compiler technologies.
-
-These advancements in MrDocs represent a significant step towards making the tool more accessible and efficient for users. By reducing complexity and enhancing functionality, I am contributing to a tool that is becoming increasingly vital for C++ developers, especially in the realm of documentation and code analysis. My exploration of LLVM and Clang’s LibTooling is playing a crucial role in this endeavor, opening up new possibilities for future enhancements and innovations.
-
-# Continued Collaboration with Boost Beast
-
-While my direct involvement with Boost Beast has reduced, I continue to support the project, providing insights and assistance to Mohammad, who is doing an exceptional job as the maintainer.
-
-This quarter has been a journey of technical exploration, collaboration, and innovation. My work in modularization, performance optimization, and tool development reflects my dedication to the continuous advancement of the Boost Libraries and MrDocs. I am excited about the potential impact of these projects and look forward to contributing further to these vibrant and dynamic communities.
\ No newline at end of file
diff --git a/_posts/2024-01-10-Joaquins2023Q4Update.md b/_posts/2024-01-10-Joaquins2023Q4Update.md
deleted file mode 100644
index 9301cff6a..000000000
--- a/_posts/2024-01-10-Joaquins2023Q4Update.md
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: joaquin
-title: Joaquín's Q4 2023 Update
-author-id: joaquin
-author-name: Joaquín M López Muñoz
----
-
-During Q4 2023, I've been working (mostly in collaboration with Chris) in the following areas:
-
-### Boost.Unordered
-
-* Implemented _bulk visitation_ for `boost::concurrent_flat_[map|set]`. In short, bulk visitation
-visits a bunch of elements at once, so instead of writing:
-
-This functionality is not provided for mere syntactic convenience: Boost.Unordered speeds up
-the entire process by pipelining the different internal stages of each individual visitation,
-which results in performance improvements of 40% and more. The article
-["Bulk visitation in `boost::concurrent_flat_map`"](https://bannalia.blogspot.com/2023/10/bulk-visitation-in-boostconcurrentflatm.html)
-discusses this new feature in much detail.
-* [Removed some unneeded using declarations](https://github.com/boostorg/unordered/pull/218) (removal of unneeded
-using declarations), contributed some [hardening code](https://github.com/boostorg/unordered/commit/dbe93c765c56cb242c99a3801828f9d506fbb658),
-[revamped the repo's README.md](https://github.com/boostorg/unordered/pull/219).
-* Shipped [Boost.Unordered 1.84](https://www.boost.org/doc/libs/1_84_0/libs/unordered/doc/html/unordered.html#changes_release_1_84_0_major_update).
-* Begun exploratory work towards adding new containers based on
-[_perfect hashing_](https://en.wikipedia.org/wiki/Perfect_hash_function). The key idea behind
-a perfect hashmap is that its elements are known in advance at initialization time, which
-allows for the construction of an ad hoc hash function guaranteeing _zero collisions_ (for
-the given set of elements). There's a tradeoff between lookup times (which can be extremely
-fast based on the zero-collision assumption) and construction times (typically much larger
-than for a classical hashmap), and moreover elements can't be inserted and deleted once
-the map is built. We have explored so far two well-known techniques from the literature for
-the generation of the associated perfect hash function:
-[Hash, Displace and Compress](https://cmph.sourceforge.net/papers/esa09.pdf) (without the compress part)
-and the algorithm from [Fredman, Komlós and Szemerédi](https://dl.acm.org/doi/pdf/10.1145/828.1884)
-(FKS), with promising results. Progress, however, has been slower than expected, so the
-target for new perfect containers in Boost.Unordered is Boost 1.86 (Aug 2024).
-* After our launch of `boost::concurrent_flat_map`, a new contender
-called [ParlayHash](https://github.com/cmuparlay/parlayhash) has arisen. ParlayHash achieves
-very good performance for massively parallel scenarios (dozens of cores) thanks to its
-smart latch-free design based on [epochs](http://csng.cs.toronto.edu/publication_files/0000/0159/jpdc07.pdf)
-for the reclamation of erased elements. The design imposes some limitations not present
-in `boost::concurrent_flat_map`, most notably that elements must be immutable, but
-its excellent performance has spurred Fernando and I to begin exploratory work towards adopting similar
-techniques in the open-addressing context we use. It's currently too early to know if this
-work will result in the addition of new concurrent containers to Boost.Unordered. As a
-spin-off of this activity, a variant of `boost::concurrent_flat_map` with
-[almost-latch-free insertion](https://github.com/boostorg/unordered/tree/feature/cfoa_alf_insert)
-has been implemented —the decision is pending whether this will be officially merged.
-
-### New website
-
-* I've contributed a small section on [tweet proposals](https://www.preview.boost.org/doc/contributor-guide/tweeting.html).
-Although the presence of Boost in social media has increased notably in the last few years,
-I think much more need to be done, and has to be done with contributions from the entire community.
-
-### Looking back and forward
-
-I began collaborating with the C++ Alliance almost two years ago, when I was gently hooked by
-Vinnie and Peter to work on the evolution project for Boost.Unordered alongide my colleague
-Chris Mazakas. The experience so far has been a joyous one, and I've had the opportunity
-to meet and work with a group of outstanding professionals from all over the globe.
-Braden Ganetsky recently joined the Boost.Unordered maintainance team,
-and it's been my pleasure to guide him through the onboarding process.
-
-Going forward, I feel that most of the [goals for Boost.Unordered](https://pdimov.github.io/articles/unordered_dev_plan.html)
-put forth by Peter Dimov in 2022 have been met, and it's only natural that the activitiy
-in this library will decrease along this year. I'm totally open to new challenges for
-the evolution of Boost, particularly if they're math-oriented and can advance the state of
-the art for C++ in general —drop me a line if you have an idea in mind!
diff --git a/_posts/2024-01-10-MattsQ4Update.md b/_posts/2024-01-10-MattsQ4Update.md
deleted file mode 100644
index fab5ccd59..000000000
--- a/_posts/2024-01-10-MattsQ4Update.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: matt
-title: Matt's Q4 2023 Update
-author-id: matt
-author-name: Matt Borland
----
-
-Over the past few months I have been working on a number of libraries both for proposal to, and currently in Boost.
-
-## New Libraries
-
-### Charconv
-
-The Charconv (https://github.com/cppalliance/charconv) review period is scheduled for 15 - 25 Jan 2024.
-Directions for use with B2, VCPKG, and Conan are provided to allow for testing, and evaluation of the library.
-All feedback is welcome and appreciated.
-Reviews can be submitted to the mailing list or the Review Manager, Chris Kormanyos at e_float@yahoo.com.
-Discussion of this library can be found on the Cpplang slack at `#boost-charconv`.
-
-### Decimal
-
-Decimal (https://github.com/cppalliance/decimal) is a ground-up implementation of the IEEE 754 Decimal Floating Point types in C++14, and is co-authored by Chris Kormanyos.
-The library has made significant progress this quarter with most of the features from IEEE 754-2019 and TR 24733 being implemented.
-Looking to next quarter we will continue to implement more features, and begin working on optimization as we have been focusing first on correctness.
-Discussion of this library can be found on the Cpplang slack at `#boost-decimal`.
-
-## Existing Libraries
-
-### Math
-
-A recent RFC in Scipy has led to the decision to begin replacing their existing Special Function implementations with C++ to enable CUDA support.
-They will be using the existing code from Boost.Math and in return contribute bugfixes and CUDA implementations as needed.
-This continues to deepen our mutually beneficial relationship with them.
-
-### Random
-
-An implementation of Vigna's Splitmix64 (https://prng.di.unimi.it/index.php) has been merged recently.
-The next step is to complete the implementation of the xoshiro / xoroshiro PRNGs.
-These new PRNGs are can be faster, and have fewer BigCrush failures than the PRNGs defined in the C++ Standard.
-
-### Numeric.Odeint
-
-As of a few weeks ago Nick Thompson and I have been added as primary maintainers of Boost.Numeric.Odeint.
-Our immediate goal is to modernize the library (e.g. remove unneeded dependencies for C++03), and fix existing issues to support both the Boost and R communities.
-
-## A year in review at the C++ Alliance
-
-As of writing I have now worked at the C++ Alliance for a full year.
-This is my first job working in Open Source Software, and first remote position.
-I have thoroughly enjoyed collaborating with and meeting other developers from around the world.
-I look forward to continuing development of Boost Libraries in the future!
diff --git a/_posts/2024-01-10-MohammadsQ4Update.md b/_posts/2024-01-10-MohammadsQ4Update.md
deleted file mode 100644
index 0d76a4236..000000000
--- a/_posts/2024-01-10-MohammadsQ4Update.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: mohammad
-title: Mohammad's Q4 2023 Update
-author-id: mohammad
----
-
-Over the last few months I have been mainly working on Boost.Beast and Boost.PropertyTree.
-
-
-### Keeping Boost.Beast in Good Form
-
-I've recently taken on a more active role in maintaining Boost.Beast. To begin, I reviewed all the open issues to gain a better understanding of the project's current state. In the course of this process, I successfully addressed several issues that did not necessitate significant refactoring.
-
-Here are a couple of contributions to the project that I find interesting:
-
-##### Specializing `asio::associator` for `bind_wrapper` and `bind_front_wrapper`
-
-Because `bind_wrapper` and `bind_front_wrapper` wrap the user's handlers, all of the associators with the original handler become invisible to Asio. In order to resolve that, Beast has been specializing each associator individually for bound wrappers. However, this makes Asio consistently assume the presence of an associated executor with the bound handlers.
-
-Fortunately, the fix is easy; we only need to specialize `asio::associator` for the bound wrappers, and it can query all the associators from the wrapper handler. You can read more in [this pull request](https://github.com/boostorg/beast/pull/2782).
-
-##### Replacing internal uses of `beast::bind_front_handler` with `asio::prepend`
-
-`bind_front_handler` is a utility in Beast that allows binding a list of arguments to an existing handler and creating a new handler with a different signature. This is especially useful when we want to pass additional arguments to handlers. For example, the following code binds an error code to a handler, making it invokable without needing any argument:
-
-```C++
-asio::dispatch(ex, beast::bind_front_handler(std::move(handler), ec));
-```
-
-With the introduction of `asio::prepend` in Boost 1.80, we can replace the previous code with:
-
-```C++
-asio::dispatch(ex, asio::prepend(std::move(handler), ec));
-```
-
-However, `beast::bind_front_handler` has a specialized invoke function for member functions, which makes it possible to concisely turn member functions into handlers:
-
-```C++
-ws_.async_read(
- buffer_,
- beast::bind_front_handler(
- &websocket_session::on_read,
- shared_from_this()));
-```
-
-This isn't possible with `asio::prepend`; that's why I decided to leave uses of `beast::bind_front_handler` in the examples intact.
-
-
-### Trimming Dead Leaves off Boost.PropertyTree
-
-[PropertyTree](https://github.com/boostorg/property_tree) has served Boost users for almost two decades. However, it experienced a period without an active maintainer, resulting in a backlog of issues and pull requests in its repository.
-
-Given PropertyTree's shift to maintenance mode, we made the decision to refrain from introducing new features or making breaking changes to the interfaces. Consequently, I reviewed all the issues and pull requests, addressing them unless they involved adding a new feature. With these fixes implemented and an improved CI script, PropertyTree was prepared for the Boost 1.84 release, hopefully in a slightly better state.
\ No newline at end of file
diff --git a/_posts/2024-01-10-RubenQ4.md b/_posts/2024-01-10-RubenQ4.md
deleted file mode 100644
index 223b64c3c..000000000
--- a/_posts/2024-01-10-RubenQ4.md
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: q4_update
-title: "Ruben's January update: presenting Boost.MySQL's new pool!"
-author-id: ruben
-author-name: Rubén Pérez Hidalgo
----
-
-In spite of it being winter here, Boost.MySQL is presenting a new pool this year!
-This was one of the most requested features in the library, and will finally be generally
-available as an experimental feature in Boost 1.85.
-
-Connection pools manage tasks that are easy to get wrong, like reconnections,
-health checks and session cleanup. As they reuse physical connections,
-they also provide [a nice efficiency boost](https://www.boost.org/doc/libs/master/libs/mysql/doc/html/mysql/connection_pool.html#mysql.connection_pool.benchmarks).
-
-Using a pool is as simple as:
-
-```
-boost::asio::awaitable get_num_employees(boost::mysql::connection_pool& pool)
-{
- // Get a fresh connection from the pool
- auto conn = co_await pool.async_get_connection(boost::asio::use_awaitable);
-
- // Use the connection. It will be returned to the pool on scope exit
- results result;
- co_await conn->async_execute("SELECT COUNT(*) FROM employee", result, boost::asio::use_awaitable);
- co_return result.rows().at(0).at(0).as_int64();
-}
-```
-
-Despite its simple interface, implementing `connection_pool` has been a long and hard
-task that has taken me a considerable effort. In the process, I've been able to
-learn in-depth many things about Boost.Asio which I thought I knew, but it turns out I did not.
-
-For instance, I've finally understood the Asio property system, and wrote
-[a blog post on it](https://anarthal.github.io/cppblog/asio-props.html), hoping
-that others will find it easier to understand. I've also managed to make `connection_pool`
-easy to use in multi-threaded contexts using strands. I've found many subtle pitfalls
-here that are easy to get wrong. Thread-sanitizer for the win!
-
-In any case, I've been able to get help from other knowledgeable Asio
-developers, both from inside and outside the C++ Alliance, which has been
-really helpful to get this done.
-
-I'm also pretty happy with the role that [Boost ServerTech chat](https://github.com/anarthal/servertech-chat)
-has played in this task. ServerTech chat is a project to showcase how Boost libraries can be used
-together, and a place to innovate. Thanks to it, I've had an almost real-world environment
-to battle-test my API in.
-
-## Type-erased connections
-
-With `connection_pool`, Boost.MySQL is getting a new type-erased connection
-type, `any_connection`, with much easier connection establishment semantics.
-Connection pooling relies on such semantics to provide better efficiency.
-
-`any_connection` is currently experimental, but I expect it to become the
-default connection type once it becomes stable.
-
-## Boost.Redis finally released!
-
-Boost.Redis finally got its first release with Boost 1.84. I'm proud to have
-helped the author integrate this library into Boost.
-
-Boost integration is not a trivial process. Most novice authors only have CMake
-experience, but Boost builds require some parts to use B2. Even with CMake,
-some parts need to adhere to certain conventions to integrate with the Boost
-superproject. I've recently gone through this, so I've been able to help here.
-
-I'm also pretty happy about my teammate's efforts on documenting these
-processes in the [contributor guide](https://docs.cppalliance.org/contributor-guide/index.html).
-
-## Sans-io all the things
-
-Up to Boost 1.84, all network algorithms in Boost.MySQL are internally
-implemented as layered functions calling Boost.Asio primitives. As an overly
-simplistic example, `connection::execute` calls two functions, `write_request`
-and `read_response`, which end up in socket calls.
-
-While this is the obvious way to implement such a library, it generates a lot of
-duplication. There is `connection::execute` and `connection::async_execute`,
-which yields two versions of every underlying function. It's also slow
-to compile (Asio async code is full of heavy templates) and hard to test.
-
-Starting from Boost 1.85, all network algorithms are implemented as state machines.
-Such algorithms are called sans-io, because they don't directly invoke any I/O functions.
-A thin layer of Asio code connects such algorithms to I/O, yielding the same interface
-as before. These algorithms are currently an implementation detail, and not exposed to the public.
-This change has made the library much simpler and enjoyable to test. Even if
-you're not planning to support sync functions, consider going sans-io - your unit tests will thank you.
-
-## Next steps: SQL query formatting
-
-The next big step is supporting client-side query composition. This makes use cases
-like dynamic filters much easier to implement, and can increase efficiency by
-saving round-trips to the server. I'm currently examining the great [fmt](https://github.com/fmtlib/fmt)
-library as a source of inspiration.
diff --git a/_posts/2024-01-10-SamsQ4Update.md b/_posts/2024-01-10-SamsQ4Update.md
deleted file mode 100644
index 5a8c139a8..000000000
--- a/_posts/2024-01-10-SamsQ4Update.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Sam's Q4 2023 Update
-author-id: sam
----
-
-Here's an overview of some projects I have been working on the last few months.
-
-### Doc Previews
-
-A Jenkins server is building "pull request doc previews" for multiple boost repositories. If any Boost author is interested in "doc previews" let me know.
-
-Specific previews: adding an Antora version to boostorg/url.
-
-Upgraded the server packages, apps, and operating system to Ubuntu 22.04.
-
-Mr. Docs has a dedicated server for docs testing. Debugged CI deployment issues there.
-
-### JSON benchmarks
-
-https://benchmark.cppalliance.org/
-
-Continued from the previous month, updated JSON benchmarks scripts to use a consistent output file location and revised jamfile.
-
-### Boostorg website cppalliance/temp-site
-
-Added prometheus and nagios monitoring alerts. Checking on status of db backup scripts. Upgraded CircleCI so release-tools will deploy to AWS S3, and temp-site will publish develop/master snapshots. Slack chat with Greg about library documentation. Test/debug slow load times of library docs on the site and reported findings. Discussions with Spencer about how URL maps ought to work. Sent Glen/Marshall info about deploying new boost releases on preview.boost.org. Ran a sync from prod to stage, including database and S3 files, so that stage looks like production. Added Frank and Lacie in GCP. On the topic of quickbook for release notes: probably migrate to asciidocs. Created docs at https://github.com/cppalliance/temp-site-documentation. Deploy more domain names for testing. boost.io. Sent Calendar API info to Lacie. Wowbagger: cron scripts to backup files.
-
-### Load testing
-
-Investigated https://github.com/locustio/locust/ and Bees with Machine Guns. Installed both tools. Sent a PR (merged) to locustio improving their terraform script. After a couple days, for the sake of time, concluded we can solve 'load testing' problems by installing a CDN in front of the website, thus removing most traffic. Switch to that goal.
-
-### CDN Fastly
-
-Set up a CDN front-end to the temp-site at Fastly. Extensive testing, many iterations of VCL. Added SSL certificates. Opened a case to discuss the existing conflict/overlap when acme-challenge is used on both the backend cluster, and the CDN. They are planning to implement a new RFC in the next year to improve the situation. The same hostname should be applied on all servers so that social auth works. Updated kubernetes from "Ingress" to "Gateway API", to improve SSL requests. Deployed "Gateway" in each environment.
-
-### boostorg/release-tools
-
-Assisted Alan, who is adding Antora support to boost releases. Generated docker images with additional nodejs packages. Debug/test the main release scripts, which were modified.
-Added packages to the images for Klemens. Added CI, code formatter 'black', similar to website. Updated boostorg/boost to use the new images.
-
-### Mailman project
-
-Setting up test instances of mailman2 and mailman3, to test and document mailman users, members, passwords, especially after an upgrade/migration. Wrote an improved documentation section for mailman-suite (merged) at https://gitlab.com/mailman/mailman-suite-doc. Various updates to cppalliance/ansible-mailman3 codebase. Meetings with Boost Foundation about the mailing lists. Install ElasticSearch on all instances. Added kube variables in temp-site, pointing to mm instances. Mailman cli test.
-
-### Self-hosted runners
-
-Analyzing the large codebase at philips-labs/terraform-aws-github-runner (tagr). Sent them yet another bugfix. Also, there is an outstanding issue (they still have not implemented) whereby the default runner labels can be entirely replaced/customized, and if that is done, tagr could be rolled out to more repositories with less risk of unforseen conflicts caused by label matching in the future. Ongoing. Installed LLVM on windows-2022 image.
-
-### Drone
-
-Upgraded the Drone executable itself. New dark mode support! That's the last commit from Drone which is being transmogrified into "https://github.com/harness/gitness". When gitness is eventually ready it may become a drop-in replacement, but that is not yet certain.
-
-As requested by Alexander, install the latest "macOS 14 Sonoma" machines. Worked with boostorg/math to retire the oldest "macOS 10.13 High Sierra" that are now offline, and use 14. New drone 23.10 image for Peter. Sent a PR to boostorg/url to centralize the drone 'generate' function at cppalliance/ci-automation.
-
diff --git a/_posts/2024-01-11-PeterTurcan-Q4-2023.md b/_posts/2024-01-11-PeterTurcan-Q4-2023.md
deleted file mode 100644
index 7845368db..000000000
--- a/_posts/2024-01-11-PeterTurcan-Q4-2023.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: peter
-title: Peter Turcan Q4 2023 Status
-author-id: peter
-author-name: Peter Turcan
----
-
-* Wrote a section on the Super-project, based on some legacy Wiki pages that needed considerable updating, and the boostdep tool steps as this covered installing the necessary components. Covered four main topics: the Super-project layout, Getting Started, Library Maintenance and Library Workflow.
-
-* Wrote a Best-Practices section on Beneficial Dependencies - the dependencies used by the most libraries of Boost. These include Boost.Config, Boost.Core, Boost.Assert, Boost.StaticAssert, Boost.ThrowException, and for metaprogramming Boost.MP11.
-
-* Added a section to the Boost History topic covering the BoostCon/C++ Now conference. The location of the conference in Aspen, Colorado comes with some colorful twists: "Mild-mannered black bears live in the area. Please close doors behind you in the evenings."
-
-* Updated the Documentation and Requirements sections with some good information for organization requirements, library metadata and documentation guidelines.
-
-* Added a Site-docs Style Guide, based on an existing style guide that was outside of the documentation, covering some basic color, wording, URL and image guidelines.
-
-* Improved some navigation linking issues.
-
-* Improved some headline text, bringing the headlines into line with our standards.
-
-* Provided some detailed blog post feedback.
-
-* Working on a number of topics that have yet to be completed, including the B2 and CMake build systems, the developers FAQ, the Boost Test Matrix, Advanced Builds, and others.
\ No newline at end of file
diff --git a/_posts/2024-01-12-AlanQ4Update.md b/_posts/2024-01-12-AlanQ4Update.md
deleted file mode 100644
index 52337a11e..000000000
--- a/_posts/2024-01-12-AlanQ4Update.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: alan
-title: Alan's Q4 Update 2023
-author-id: alan
----
-
-# Summary
-
-- [MrDocs](#mrdocs)
-- [Handlebars](#handlebars)
-- [Boost Libraries](#boost-libraries)
-- [Boost Release Tools](#boost-release-tools)
-- [Boost Website](#boost-website)
-- [C++ Github Actions](#c-github-actions)
-
-## MrDocs
-
-Over the last quarter, we continued to work intensely on [MrDocs](https://github.com/cppalliance/mrdocs), a
-documentation generator for C++ projects. I've been overseeing and reviewing all the work done by the other contributors
-in the project. Besides the work done by Krystian, Fernando also joined the project as a contributor.
-
-We've been working on making the project at least as competitive as Doxygen + Docca, so we can start using it in a
-subset of Boost libraries. In this process, we have also been improving usability and documentation, considering
-feedback we received.
-
-In particular, work in Q4 included:
-
-- Improvements to the documentation, both in javadocs and exposition
-- Reducing technical debt, including extending APIs and problems with handlebars templates
-- Updating CI workflows in coordination with the [C++ Github Actions](#c-github-actions) project
-- Usability improvements, including tools relative to installed executable and many new features for templates
-- Fixes and improvements to installation procedure
-- Features such as symbols with links to repositories and many new sections in templates
-- Auxiliary utilities, such as a complete implementation of `std::expected` for the project
-- Generating binaries for the new LLVM version and updating the documentation
-- Improvements to build scripts with CMake presets and vcpkg manifest files
-
-In parallel, Boost.URL has integrated MrDocs, and is temporarily generating the documentation with both
-Doxygen+Docca and Antora+MrDocs. The documentation includes an extension to collect reference pages
-generated by MrDocs and include them in the Antora documentation. A preview of the documentation
-generated by MrDocs is available at https://792.urlantora.prtest2.cppalliance.org/site/url/index.html.
-
-In general, I've been responsible for:
-
-- setting up and maintaining CI for the project;
-- MrDocs and LLVM release binaries;
-- build scripts;
-- setting up and integrating dependencies;
-- setting up and deploying the Antora toolchains and documentation to the project website;
-- working on supporting libraries; and
-- supervising and reviewing the work done by other contributors (Krystian and Fernando);
-- fixing bugs.
-
-## Handlebars
-
-MrDocs includes a support library that reimplements the [Handlebars](https://handlebarsjs.com/) template engine in C++.
-This module is used to generate documentation from templates. This continued to be the MrDocs support
-library in which I have been investing most of my time.
-
-MrDocs also includes a support library called "DOM" that provides a C++ interface to type-erased property trees, such as
-JSON and Javascript Objects. This module is used by MrDocs to create opaque representations of property trees that can
-be used by the Handlebars engine and other library functions. Such representations can come from a variety of sources,
-including JSON files, Javascript objects, and internal C++ objects with information parsed by MrDocs.
-
-In Q4, a number of improvements relevant to MrDocs were made to the Handlebars and DOM libraries. The main
-feature is the addition of complete bindings for Javascript objects, arrays, and functions. The C++/JavaScript
-bindings work both ways, so that C++ objects can be used in Javascript and Javascript objects can be used in C++.
-Unit tests for the DOM were also included to complement the Handlebars unit tests.
-
-These changes enabled the original implementation of handlebars.js to be completely removed from the project.
-
-## Boost Libraries
-
-As in other quarters, the Boost Library in which I have been investing most of my time
-is [Boost.URL](https://github.com/boostorg/url). The library is in maintenance mode since our focus shifted to MrDocs,
-but there is a constant demand for work fixing bugs and improving the documentation.
-
-In Q4, Boost.URL has integrated MrDocs, and is temporarily generating the documentation with both
-Doxygen+Docca and Antora+MrDocs:
-
-- All the content in the QuickBook documentation has been rewritten in asciidoc format. Alternatives have been
- developed to replace special QuickBook features, such as snippets.
-- The Boost.URL repository now includes an extension to automatically find or
- download MrDocs and use it to collect reference pages generated by MrDocs. The reference pages are aggregates
- as logical files in the Antora system, which includes them in the Antora documentation.
-
-A preview of the documentation generated by MrDocs is available
-at https://792.urlantora.prtest2.cppalliance.org/site/url/index.html.
-
-Besides the integration with MrDocs, work in Q4 included:
-
-- Improvements, simplifications, and a number of extensions to CI
-- Fixed previous design choices that were incompatible with the super-project
-- Fixes to build scripts to support for more compilers
-- Extended integration tests to cover all cases described by the super-project CMake scripts
-- Fixes to algorithms, such as the `parse_query` and `format` functions
-- GDB pretty printers are now provided
-
-Many improvements had to be coordinated with the [C++ Github Actions](#c-github-actions) project, which had new
-features implemented for these use cases.
-
-In general, I've been responsible for:
-
-- upgrading CI, mostly coordinating with the [C++ Github Actions](#c-github-actions);
-- maintaining, simplifying, and updating build scripts;
-- integrating more spec tests, such as the Ada tests included more recently;
-- including more examples, such as the more recent sanitize-URL example;
-- fixing documentation content that is out of date; and
-- fixing bugs.
-
-Besides Boost.URL, as usual, I've been overseeing and fixing smaller issues with other boost libraries. In Q4,
-only some small work in Boost.StaticString was necessary.
-
-## Boost Release Tools
-
-Over the last quarter, I've been working on the integration of toolchains I developed into the Boost Release Tools to
-add support for features desired for the new website. In Q3, I previously included Antora support in the release tools
-and this has already been deployed in the official Boost release 1.84.0.
-
-In Q4, I opened a pull request to include support for archive variants in the release tools. This pull request is
-currently under review and will be considered for inclusion in a period between Boost releases when priority
-moves from the [Boost website](#boost-website).
-
-## Boost Website
-
-Among the many support projects for the new Boost website, I've been helping the most on
-[`cppalliance/site-docs`](https://github.com/cppalliance/site-docs), which includes the Boost website documentation as
-an Antora project.
-Its components represent the "User Guide", "Contributor Guide", and "Formal Review" sections of the website.
-
-Since the inception of the project, I've been overseeing and reviewing all the work done by the other contributors to
-the project.
-
-In general, I continue to be responsible for:
-
-- reviewing and merging all pull requests to the project;
-- setting up and maintaining CI for the project;
-- coordinating with the website project on content uploaded to AWS buckets;
-- build scripts to be reused by the release tools and previews;
-- writing sections of the documentation that require technical knowledge;
-- developing custom Boost/Antora extensions, such as the Boost Macro extension;
-- maintaining the Antora toolchain and templates; and
-- adjusted Boost libraries to match formats expected by the website.
-
-## C++ Github Actions
-
-[C++ Github Actions](https://github.com/alandefreitas/cpp-actions) is a project I created and have been maintaining
-since Q3. It is a collection of compasable, independent, and reusable Github Actions for any C++ project that needs to
-be tested on a variety of compilers and environments.
-
-Both MrDocs are Boost.URL are currently using these actions in their CI. Boost.Http is now also using these actions
-in its CI, which gave us the opportunity to get more feedback and improve the actions.
-
-The project includes actions to:
-
-- Generate a Github Actions Matrix for C++ projects;
-- Setup C++ compilers;
-- Install and setup packages;
-- Clone Boost modules;
-- Run complete CMake and `b2` workflows;
-- Generate changelogs from conventional commits;
-- Generate summaries; and
-- Generate time-trace reports and flame graphs
-
-In particular, a number of new features have been added to the project in Q4.
-
-- The matrix has been adapted to include multiple extra flags and values over time.
-- Actions that generate summaries included improvements and new sections.
-- Actions can now identify many new types of warnings that should be emitted.
-- CMake and B2 workflows included more options and features.
-- The matrix new support composite factors, combinatorial factors, and custom containers.
-- Documentation has been improved to also use the collector Antora extension.
-
-Besides new features, most work went into fixing issues that were revealed by testing the actions in new environments.
-About ~50 bugs with various levels of severity have been fixed in Q4.
diff --git a/_posts/2024-01-12-KrystianQ4Update.md b/_posts/2024-01-12-KrystianQ4Update.md
deleted file mode 100644
index ee88f0b5c..000000000
--- a/_posts/2024-01-12-KrystianQ4Update.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's Q4 Update
-author-id: krystian
----
-
-Much like the last, my primary focus this quarter was on MrDocs, with some additional work done on Boost.StaticString and clang.
-
-## MrDocs
-
-The last significant feature MrDocs needed in order to produce documentation on par with Docca was the ability to render overload set. At a glance this may appear trivial, but MrDocs was designed to render documentation on a per-declaration basis: each declaration would result in a single documentation section being emitted by a generator. This is problematic for overload sets, as an overload sets are not declarations. I ended implementing them as a "view" of the lookup table for a particular scope.
-
-Besides implementing support for rendering overload sets, I further expanded the kinds of declarations supported by MrDocs to include friend declarations, deduction guides, and enumerators. Previously, enumerators were stored as a non-`Info` type, meaning they could not be found by the name lookup mechanism when referenced with `@ref`/`@copydoc`. Adding support for friend declarations also had its own set of challenges due to a lack of support by the clang USR generator. As an interim solution, I'm generating pseudo-USRs for friends by concatenating the USR of their lexical scope with the USR of the referenced declaration or type. While this is sufficient for now, it will ultimately be necessary to submit patches to clang to fix the plethora of bugs in USR generation, as well as add support for newer C++ features such as constraints.
-
-Another problem area I addressed was the representation of qualified names for symbols that were not extracted. Previously, three different kinds of `TypeInfo` were used to represent "terminal" types (i.e. typenames): `BuiltinTypeInfo` for builtin types, `TagTypeInfo` for class and enumeration types, and `SpecializationTypeInfo` for class template specializations. These types were awkward to work with, required a non-trivial amount of boilerplate, and were incapable of representing a typename that was qualified by a nested-name-specifier that named a symbol that was not extracted. To remedy this, I created a `SymbolName` type that can represent a qualified-id naming any symbol and replaced the three terminal `TypeInfo` kinds with `NamedTypeInfo`.
-
-## Clang
-
-On the clang side of things, I continued work on fixing C++ conformance issues. This included diagnosing friend function specialization definitions (e.g. `friend void f() { }`), diagnosing unexpanded packs in function template explicit specializations (e.g. `template<> void f();` where `Ts` is a pack), and improving diagnostics for unexpanded packs in class/variable template partial/explicit specializations.
-
-In terms of in-progress patches, I am currently working on a patch that will significantly improve dependent name lookup -- both in terms of conformance and diagnostics. Currently, even obviously ill-formed constructs such as:
-```cpp
-template
-struct A
-{
- auto f()
- {
- return this->x;
- }
-};
-```
-are not diagnosed until the template is instantiated. Although this behavior is conforming, in less contrived scenarios, it would be far better to avoid an avalanche of diagnositic messages by diagnosing this at the point of definition. This is possible primarily due to [[temp.dep.type] p6](http://eel.is/c++draft/temp.dep.type#6):
-
-> If, for a given set of template arguments, a specialization of a template is instantiated that refers to a member of the current instantiation with a qualified name, the name is looked up in the template instantiation context.
-If the result of this lookup differs from the result of name lookup in the template definition context, name lookup is ambiguous.
-
-and [[temp.dep.type] p5](http://eel.is/c++draft/temp.dep.type#5):
-
-> A qualified name is dependent if
-> - [...]
-> - its lookup context is the current instantiation and has at least one dependent base class, and qualified name lookup for the name finds nothing
-
-This guarantees that, within the definition of a template, if the
-lookup context of a qualified name is that that template:
-- if lookup finds any member of the template, then the result of lookup in the instantiation context must also find that member, or
-- if lookup finds nothing, then the program is ill-formed unless the name is found in a dependent base class.
diff --git a/_posts/2024-01-12-dmitrys-q4-update.md b/_posts/2024-01-12-dmitrys-q4-update.md
deleted file mode 100644
index 66adb178a..000000000
--- a/_posts/2024-01-12-dmitrys-q4-update.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: Dmitry's Q4 2023 Update
-author-id: dmitry
-author-name: Dmitry Arkhipov
----
-
-In the fourth quarter of 2023 the functionality for direct parsing in JSON was
-finalised and the code was merged into the mainline and then released in
-Boost 1.84.0. Thus, I've moved to the natural opposite of direct parsing:
-direct serialisation. Boost.JSON's serialisation is less customisable then
-parsing, since the demand for custom serialisation is significantly lower. As
-a result, the design of the serialiser is quite different from that of the
-parser, and hence a different approach had to be taken to implement direct
-serialisation. That approach, in my opinion, has a big benefit for the user:
-there's no need for a dedicated direct serializer type, it can be done with
-the regular `boost::json::serializer`. On the other hand, it presents a
-different challenge: making changes to `serializer` in a way that does not
-negatively affect its performance too much.
-
-This fight for performance has occupied most of my time in the last quarter.
-And it also provided me an opportunity to experiment with different potential
-optimisations to the serializer. I would also like to comment that different
-C++ implementations sometimes have directly opposite view on what's better or
-worse for performance, which poses quite a conundrum in such line of work. And
-finally, this work was greatly influenced by the availability of continous
-integration infrastructure set up by the C++ Alliance, and automatic
-benchmarking in particular.
-
-Another positive effect of C++ Alliance's CI is due to coverage reporting.
-As I was striving to never decrease the rate of code coverage, I've discovered
-code in the serializer that used to perform a function, but have since become
-unnecessary due to refactoring.
-
-Overall the work on serializer has vastly increased my understanding on how
-JSON's serializer works under the hood. I plan to finish the feature of direct
-serialization before the next Boost release.
diff --git a/_posts/2024-04-05-SamsQ1Update.md b/_posts/2024-04-05-SamsQ1Update.md
deleted file mode 100644
index 28d85b4b1..000000000
--- a/_posts/2024-04-05-SamsQ1Update.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Sam's Q1 2024 Update
-author-id: sam
----
-
-Here's an overview of some projects I have been working on the last few months.
-
-### Boost Downloads
-
-Set up an AWS Cloudfront CDN at archives.boost.io to host the Boost releases during an outage. JFrog is planning to cancel the account, and the outage was an unintentional (or intentional) warning. Extensive experimentation using a Fastly CDN in conjunction with JFrog, however they are sending S3 redirects which prevent this configuration. Installed another origin server and a load balancer. Debugged nginx. Added TLS. archives.boost.io has now been migrated from AWS Cloudfront to Fastly, with our own origin servers being the source rather than JFrog. 50TB of traffic each month even when not advertised on boost.org. The download traffic is mostly from the React open source project.
-
-### Boost website boostorg/website-v2
-
-Configured and tested TLS certificates in GCP. Increased cpu/mem on production. Composed a runbook for go-live steps of the boost website. Worked on automating this as much as possible, for example, a sticking point was that Social Auth (google, github) was locked to only one DNS domain. It would not respond on preview and www simultaneously. Solving that took many detours into other components, the Django "sites" framework and the allauth codebase. Worked with Lacey on a number of github issues, the main one being the problem that previous versions of boost libraries use inconsistent documentation paths. Each exception should be handled individually. Developed comprehensive script to sync boost.io production to staging (database + S3 files). Researching postgres backup/restore topics. Migrated preview.boost.org to www.boost.io. Some issues with transition to boostorg github. Solved trailing-slash bug: website links are missing the final slash character needed for a directory.
-
-Created a static html mirror of boost.org: boost.org.cpp.al.
-
-### boostorg/release-tools
-
-Sent a PR to boostorg/boost with an updated release docker image. Refactored multiple scripts to be able to support Github Releases if that option is needed in the future. Consider options to reduce the size of the releases, such as a source-only distribution. Still in the planning stages with Alan, not done. Discussion about extra Antora files in the release artifact. Developed new scripts so published releases will sync to the new CDN. Created S3 bucket boost-archives, copying files from JFrog.
-
-Debugging boostorg/cobalt docs build, sent image files to Klemens.
-
-### Mailman project
-
-Set up new test mailman servers for each environment (production, stage, etc). VMs, mailgun account, DNS, databases. Imported previous archives. Updated Ansible role to support Elasticsearch. Opened tickets with Hyperkitty about search results, display formats. Refactored ansible-mailman settings.py file to use an .env file instead of ansible variables, so it can integrate more easily with external dev environments. Created another github repo to collaborate on mailman. Checking on mailman cronjobs. Upgraded Hyperkitty.
-
-### wowbagger
-
-Early in January, engaged in discussions with the Boost Foundation about the condition of wowbagger, its relevance, list of services, issues in upgrading it.
-
-### Jenkins
-
-Reconfigure certain jobs to use their own node_modules directories, not a shared directory.
-
-### Drone
-
-Investigated and solved FreeBSD b2 crash. Deleted gcc symlink, only gcc11 remains. Advising tzlaine/parser about Drone integration.
-
diff --git a/_posts/2024-04-19-PeterTurcan-Q1-2024.md b/_posts/2024-04-19-PeterTurcan-Q1-2024.md
deleted file mode 100644
index 227c8e070..000000000
--- a/_posts/2024-04-19-PeterTurcan-Q1-2024.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: peter
-title: Peter Turcan Q1 2024 Status
-author-id: peter
-author-name: Peter Turcan
----
-
-- Added a Contributors FAQ to the Contributor Guide, then updated it with a series of questions and answers on Security - based on what I thought many new developers might ask about Security. Added a Licensing section covering questions on the BSL, migrating and updating a legacy topic. This FAQ currently has 35 questions and answers.
-
-- Reviewed and provided detailed feedback on the documentation for the candidate Boost Parsing library.
-
-- Migrated an In-memoriam topic for Beman Dawes.
-
-- In the User Guide section added a Boost Discussion Policy page, migrating and updating a legacy topic.
-
-- Added a Modular Boost section to both the User Guide FAQ and Contributor Guide FAQ, with questions and answers particular to the audience. The User Guide FAQ currently has 57 questions and answers.
-
-- Wrote a Test Matrix topic, migrating and updating a legacy topic.
-
-- Based on feedback - always welcome - updated the User Guide and Contributor Guide with more appropriate links and improved information.
-
-- In the Formal Reviews section added Managing a Review and Writing a Review topics, migrating and updating legacy topics. Currently working on more updates to the Formal Reviews section.
diff --git a/_posts/2024-04-19-RubenQ1.md b/_posts/2024-04-19-RubenQ1.md
deleted file mode 100644
index a42a73bad..000000000
--- a/_posts/2024-04-19-RubenQ1.md
+++ /dev/null
@@ -1,111 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: q1_update
-title: "Ruben's Q1 2024 Update"
-author-id: ruben
-author-name: Rubén Pérez Hidalgo
----
-
-## Client-side SQL formatting
-
-A new grand feature has arrived to Boost.MySQL's city: [client-side SQL formatting](https://www.boost.org/doc/libs/master/libs/mysql/doc/html/mysql/sql_formatting.html).
-
-If you've worked with MySQL C API before, you may be familiar with [`mysql_real_escape_string`](https://dev.mysql.com/doc/c-api/8.0/en/mysql-real-escape-string.html): a function that takes a string and escapes it, enabling the user to compose queries dynamically without the risk of running into SQL injection vulnerabilities. We didn't have a matching function in Boost.MySQL... until now.
-
-`mysql_real_escape_string` is a pretty low-level construct. While we have an equivalent function, I also wanted to build higher-level functionality to allow composing dynamic SQL queries in a simple way.
-
-Let's say you want to insert a bunch of employee records into a table. Until now, the only way to go was use prepared statements, like this:
-
-```
-asio::awaitable insert_employees(
- mysql::any_connection& conn,
- span employees
-)
-{
- assert(!employees.empty());
-
- // Prepare a statement
- auto stmt = co_await conn.async_prepare_statement("INSERT INTO employee (name, company) VALUES (?, ?)", asio::deferred);
-
- // Execute it as many times as records we want to insert.
- // Note that this performs a round-trip to the server for each record
- for (const auto& emp: employees)
- {
- co_await conn.async_execute(stmt.bind(emp.name, emp.company), asio::deferred);
- }
-}
-```
-
-This can be pretty inefficient, as we're inserting records one by one. If you're experienced with SQL, you may be thinking: "why don't you batch the inserts in a single query"? We can't do this with prepared statements. But if we had a way to securely compose a query client-side... Fortunately, we now have this!
-
-```
-asio::awaitable insert_employees(
- mysql::any_connection& conn,
- span employees
-)
-{
- assert(!employees.empty());
-
- // Compose a query string client-side, using fmtlib-like format strings.
- // Formatting will take care of escaping your string values
- mysql::format_context ctx(opts);
- ctx.append_raw("INSERT INTO employee (name, company) VALUES ");
- bool is_first = true;
- for (const auto& emp : employees)
- {
- // Comma separator
- if (!is_first) ctx.append_raw(", ");
- is_first = false;
-
- // Actual record
- mysql::format_sql_to(ctx, "({}, {})", emp.name, emp.company);
- }
- std::string query = std::move(ctx).get().value();
-
- co_await conn.async_execute(query, asio::use_awaitable);
-}
-```
-
-And if you're issuing simple queries, you may be able to avoid prepared statements altogether:
-
-```
-asio::awaitable get_employee_by_id(
- mysql::any_connection& conn,
- std::int64_t id
-)
-{
- mysql::static_results r;
- co_await conn.async_execute(
- mysql::format_sql(conn.format_opts().value(), "SELECT * FROM employee WHERE id = {}", id),
- asio::deferred
- );
- co_return std::move(r.rows().at(0));
-}
-```
-
-This feature enables lots of other complex use cases, like dynamic filters, patch-like updates and pipelining.
-
-## Boost.PFR integration and maintenance work
-
-Boost.PFR users may be happy to know that `static_results` and friends now support PFR types! This will become available in Boost 1.86.
-
-Additionally, I've been doing some extra maintenance work to attempt to deliver a Boost 1.85 as high-quality as possible.
-
-## Boost.Charconv
-
-Many of you may have heard that Boost.Charconv, by Matt Borland, is now a proud member of Boost! Charconv is a high-quality polyfill library that provides functionality like `std::to_chars` and `std::from_chars` in C++11. I desperately needed this for Boost.MySQL, as text queries parse ints and doubles, and client-side formatting serializes them. Locale-independence is particularly important for the latter, as `SELECT 4.2` is valid SQL, while `SELECT 4,2` may open the door to a SQL injection vulnerability. Not fun.
-
-I feel that just reading the documentation and playing with a library is sometimes not enough to emit a useful review. I thought I could do better this time, and took a different approach to the review: I'd try to use the library in Boost.MySQL and report my experience.
-
-I'm pretty satisfied with the results, as focusing on a real use case avoids bike-shedding and provides good feedback. As Boost.MySQL has an extensive test suite, rewriting part of it makes the library build and run under many different scenarios, usually uncovering subtle integration issues (e.g. with CMake files). I've been very happy with the quality of the library and the author's response to my feedback, and I've decided to be an early adopter - Boost.MySQL 1.85 uses Charconv extensively.
-
-## Boost.Parser
-
-Since the new review approach had worked great, I decided to repeat it for Boost.Parser. I rewrote the client-side SQL formatting feature, which parses format strings, using Boost.Parser. Again, it by uncovering a couple of issues.
-
-I can't use Boost.Parser in MySQL because it requires C++17 (and is an overshot, considering my simple use case). But I feel the review was helpful for Boost's overall quality.
-
-## Other contributions
-
-I've also helped Boost.Redis' author to set up part of his integration testing CI, following what I learnt setting up Boost.MySQL CIs. I've also refactored MySQL's build scripts to be more compliant with Boost best-practices, which has provided me some exposure to Boost's internal machinery.
diff --git a/_posts/2024-04-20-ChristiansQ1Update.md b/_posts/2024-04-20-ChristiansQ1Update.md
deleted file mode 100644
index b02f1abb4..000000000
--- a/_posts/2024-04-20-ChristiansQ1Update.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: christian
-title: Christian's Q1 2024 Update
-author-id: christian
-author-name: Christian Mazakas
----
-
-In the past few months, I've been busy getting up to speed on the slate of proposed-for-Boost libraries:
-Boost.Buffers and Boost.Http.Proto.
-
-The first real task was getting the CI setup for that and we chose to use an in-house solution. I was never really an
-expert in GitHub Actions nor CI so there was a definite learning curve in how to debug CI issues and what's an effective
-use of time versus not.
-
-But more importantly I was able to discern what was and what wasn't important to automate in the CI pipeline. The actual
-actions you can define in GHA are incredibly powerful and as someone who's well-versed in JavaScript, I find them quite
-nice and easy to use.
-
-The in-house tools we use are called cpp-actions, available [here](https://github.com/alandefreitas/cpp-actions) if
-one is interested or wants to try them out. I'd say they're all good except for the one that generates the actual
-build matrix. This is something I learned via trial by fire.
-
-This gist of actions is that they're essentially functions with inputs and outputs. An action that generates your inputs
-for you such as compiler and compiler flags inverts the benefits and turns actions into a burden instead of a boon. This
-is because not every project will aim to support the same compilers and the same flags. Some entries of the build matrix
-will have even more special requirements than others so being able to edit the matrix and add flags or other configuration
-options becomes paramount.
-
-The action can attempt to accomodate these use-cases but the resulting syntax is likely to be poor and more trouble than
-it's worth. I noted a stark improvement in my CI experience once I was fully in control of the inputs and simply treated
-actions like an opaque functional pipeline. The ability to just add a new field to a matrix entry was a huge convenience
-in comparison to the process it took to update the matrix-building action which requires updating a separate repo and then
-a SHA in the repo actually being tested.
-
-But CI is worth it. We introduce all this complexity in our development pipelines because taking some C++ and compiling
-it on as many platforms as we can is the best way to root out undefined behavior and other bugs. I found GitHub Actions
-to be an enjoyable experience but there's caveats. Most Boost libraries use ad hoc CI configuration so I laud the work
-that's been done to abstract all this. The wisdom here is that not all forms of automation are free and there's
-different requirements and costs to be considered. Sometimes, rote copy-paste with inline editing just wins out and it's
-only through experience that we get a sense of when this is the case.
-
-As for buffer operations and HTTP, they're still being actively developed and worked on. Good progress has been made on
-that front. The container types and serialization routines have received a lot of love and are that much closer to being
-fully production-ready. CI took a long time to understand and setup so the next quarter will be more focused on actual
-C++ and the pedantry of HTTP RFCs.
diff --git a/_posts/2024-04-20-Joaquins2024Q1Update.md b/_posts/2024-04-20-Joaquins2024Q1Update.md
deleted file mode 100644
index 15d007a2c..000000000
--- a/_posts/2024-04-20-Joaquins2024Q1Update.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: joaquin
-title: Joaquín's Q1 2024 Update
-author-id: joaquin
-author-name: Joaquín M López Muñoz
----
-
-During Q1 2024, I've been working in the following areas:
-
-### Boost.Unordered
-
-* Reviewed Braden's work on optimization of `emplace(k, v)` calls ([PR#230](https://github.com/boostorg/unordered/pull/230),
-released in [Boost 1.85.0](https://www.boost.org/doc/libs/1_85_0/libs/unordered/doc/html/unordered.html#changes_release_1_85_0)).
-With this optimization, statements such as:
-
m.emplace(0,"zero");
-won't create a temporary `(0, "zero")` value if the element with key 0 already exists. This is particularly relevant
-when dynamic memory allocation is involved (for instance, if `mapped_type` is `std::string` in the example above).
-The implementation of this feature is surprisingly tricky and Braden has done a superb job at coming up with an elegant and concise formulation.
-* Fixed support for allocators with explicit copy constructors ([PR#234](https://github.com/boostorg/unordered/pull/234),
-released in [Boost 1.85.0](https://www.boost.org/doc/libs/1_85_0/libs/unordered/doc/html/unordered.html#changes_release_1_85_0)).
-* Fixed bug in the const version of `unordered_multimap::find(k, hash, eq)` ([PR#238](https://github.com/boostorg/unordered/pull/238),
-released in [Boost 1.85.0](https://www.boost.org/doc/libs/1_85_0/libs/unordered/doc/html/unordered.html#changes_release_1_85_0)).
-* Reviewed Braden's work on addition of `boost::unordered::pmr aliases` for Boost.Unordered containers using
-`std::pmr::polymorphic_allocator` ([PR#239](https://github.com/boostorg/unordered/pull/239), to be released in Boost 1.86.0).
-* I've continued working on learning about advanced concurrency techniques implemented by
-[ParlayHash](https://github.com/cmuparlay/parlayhash) with a view towards leveraging them for the
-improvement of `boost::concurrent_flat_map` in massively parallel scenarios. Latest research has
-focused on implementing (emulated) [load-link/store-conditional techniques](https://github.com/boostorg/unordered/compare/687a446784da8592f8795f1068328e9de041f63b...a4a5a3e12790df7236f1e38b3ec29cdc0463b6cc),
-but results are still well below those achieved by ParlayHash. Advance is hampered by the need to
-access a many-core machine for benchmarking, which slows down turnaround times.
-* On April 25, I'm giving a talk on perfect hashing at [using std::cpp 2024](https://eventos.uc3m.es/105614/programme/using-std-cpp-2024.html).
-I've been preparing the presentation and associated material, which will be made public shortly after the talk.
-
-### Boost.Bimap
-
-* Fixed heterogeneous lookup for side collections ([PR#42](https://github.com/boostorg/bimap/pull/42),
-released in [Boost 1.85.0](https://www.boost.org/doc/libs/1_85_0/libs/bimap/doc/html/boost_bimap/release_notes.html#boost_bimap.release_notes.boost_1_85_release)).
-Boost.Bimap is in need of a maintainer, if you'd like to take over this role please step in!
-
-### Boost promotion and new website
-
-As of lately, I'm devoting more time to Boost-related tasks outside of actual programming:
-
-* I've authored several promotional tweets such as [this](https://x.com/Boost_Libraries/status/1750559787220099577),
-[this](https://x.com/Boost_Libraries/status/1755277784824344943) and
-[this](https://x.com/Boost_Libraries/status/1768833941341896756): the art was commisioned to
-the amazing [Bob Ostrom](https://www.bobostrom.com/).
-* Starting in late March, I'm managing the project to complete the proposed [new website for Boost](https://www.boost.io/).
-We should be ready for launch early in Q2 2024. If you're curious, you can take a look at
-the project backlogs [here](https://github.com/boostorg/website-v2/issues) and [here](https://github.com/boostorg/website-v2-docs/issues).
-
-### A new golden era for Boost?
-
-I've been a contributor and keen observer of Boost for more than 20 years, back from the day when
-the project was spearheading the community initiatives to provide high-quality libraries for
-eventual standardization. After crucially contributing to the watershed upgrade of the
-standard library in C++11, Boost popularity waned, partially because of its success
-in getting many of its key components standardized, but also due to external and internal
-factors (appearance of excellent, lighter alternatives to some of its libraries,
-lack of modernization, monolithicity, etc.) In the last couple of years or so, however,
-I've noticed a resurgence in the interest for Boost: I can't provide hard data yet (I
-will eventually), but I'm following some proxy signs (presence and feedback on social media,
-mainly) that may indicate we're back on track towards better serving the C++ community. Some concrete
-initiatives that I think are helping improve the public perception of the project are:
-* Better package management support from Conan and vcpkg.
-* Modularization efforts both with [CMake](https://github.com/boostorg/cmake) and [B2](https://github.com/grafikrobot/boostorg.boost).
-* Deprecation of C++03 support by many Boost libraries.
-* Ongoing work and conversations around the proposed new website and potential [module support](https://anarthal.github.io/cppblog/modules).
-
-I'm no postmodernist, but I recognize the power of narratives and good PR strategies
-even in the supposedly objetive world of software development. Backed by its diverse
-catalog of high-quality libraries and resources,
-I'd like to contribute what little I can to communicating the current and future benefits of
-Boost to the C++ community.
diff --git a/_posts/2024-04-22-KrystiansQ1Update.md b/_posts/2024-04-22-KrystiansQ1Update.md
deleted file mode 100644
index f2a279d1f..000000000
--- a/_posts/2024-04-22-KrystiansQ1Update.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's Q1 2024 Update
-author-id: krystian
----
-
-My primary focus this quarter was on clang -- in particular, C++ standards conformance (including implementing resolutions to defect reports I've submitted), bug fixes, and various refactoring of the AST. I was also granted write access to the LLVM project repository this quarter, which has allowed me to significantly increase my output. To that end, I submitted/merged a number of patches:
-
-- #89618 [[Clang][Sema] Remove unused function after #88731](https://github.com/llvm/llvm-project/pull/89618)
-- #89605 [[libc++][NFC] Fix unparenthesized comma expression in mem-initializer](https://github.com/llvm/llvm-project/pull/89605)
-- #89377 [[libc++] Fix usage of 'exclude_from_explicit_instantiation' attribute on local class members](https://github.com/llvm/llvm-project/pull/89377)
-- #89300 [[Clang][Sema] Diagnose explicit specializations with object parameters that do not match their primary template](https://github.com/llvm/llvm-project/pull/89300)
-- #88974 [[Clang][Parse] Diagnose requires expressions with explicit object parameters](https://github.com/llvm/llvm-project/pull/88974)
-- #88963 [[Clang][Sema] Improve support for explicit specializations of constrained member functions & member function templates](https://github.com/llvm/llvm-project/pull/88963)
-- #88777 [[Clang][Sema] Warn when 'exclude_from_explicit_instantiation' attribute is used on local classes and members thereof](https://github.com/llvm/llvm-project/pull/88777)
-- #88731 [Reapply "[Clang][Sema] Fix crash when 'this' is used in a dependent class scope function template specialization that instantiates to a static member function (#87541, #88311)"](https://github.com/llvm/llvm-project/pull/88731)
-- #88417 [[lldb] Fix call to TemplateTemplateParmDecl::Create after #88139](https://github.com/llvm/llvm-project/pull/88417)
-- #88311 [Reapply "[Clang][Sema] Fix crash when 'this' is used in a dependent class scope function template specialization that instantiates to a static member function (#87541)"](https://github.com/llvm/llvm-project/pull/88311)
-- #88264 [Revert "[Clang][Sema] Fix crash when 'this' is used in a dependent class scope function template specialization that instantiates to a static member function"](https://github.com/llvm/llvm-project/pull/88264)
-- #88146 [[Clang][AST][NFC] Fix printing of dependent PackIndexTypes](https://github.com/llvm/llvm-project/pull/88146)
-- #88139 [[Clang][AST] Track whether template template parameters used the 'typename' keyword](https://github.com/llvm/llvm-project/pull/88139)
-- #88042 [[Clang][Sema] Implement approved resolution for CWG2858](https://github.com/llvm/llvm-project/pull/88042)
-- #87541 [[Clang][Sema] Fix crash when 'this' is used in a dependent class scope function template specialization that instantiates to a static member function](https://github.com/llvm/llvm-project/pull/87541)
-- #86817 [[Clang][Sema] Fix explicit specializations of member function templates with a deduced return type](https://github.com/llvm/llvm-project/pull/86817)
-- #86682 [[Clang][AST][NFC] Move template argument dependence computations for MemberExpr to computeDependence](https://github.com/llvm/llvm-project/pull/86682)
-- #86678 [[Clang][AST][NFC] MemberExpr stores NestedNameSpecifierLoc and DeclAccessPair separately](https://github.com/llvm/llvm-project/pull/86678)
-- #84457 [Revert "[Clang][Sema] Fix crash when using name of UnresolvedUsingValueDecl with template arguments (#83842)"](https://github.com/llvm/llvm-project/pull/84457)
-- #84050 [[Clang][Sema] Diagnose class member access expressions naming non-existent members of the current instantiation prior to instantiation in the absence of dependent base classes](https://github.com/llvm/llvm-project/pull/84050)
-- #83842 [[Clang][Sema] Fix crash when using name of UnresolvedUsingValueDecl with template arguments](https://github.com/llvm/llvm-project/pull/83842)
-- #83024 [[Clang][Sema] Fix crash when MS dependent base class lookup occurs in an incomplete context](https://github.com/llvm/llvm-project/pull/83024)
-- #82417 [[Clang][Sema] Defer instantiation of exception specification until after partial ordering when determining primary template](https://github.com/llvm/llvm-project/pull/82417)
-- #82277 [[Clang][Sema] Convert warning for extraneous template parameter lists to an extension warning](https://github.com/llvm/llvm-project/pull/82277)
-- #81642 [[Clang] Unify interface for accessing template arguments as written for class/variable template specializations](https://github.com/llvm/llvm-project/pull/81642)
-- #81171 [[clang-tidy] Fix failing test after #80864](https://github.com/llvm/llvm-project/pull/81171)
-- #80899 [[Clang][Sema] Implement proposed resolution for CWG2847](https://github.com/llvm/llvm-project/pull/80899)
-- #80864 [[Clang][Sema] Abbreviated function templates do not append invented parameters to empty template parameter lists](https://github.com/llvm/llvm-project/pull/80864)
-- #80842 [[Clang][Sema] Diagnose declarative nested-name-specifiers naming alias templates](https://github.com/llvm/llvm-project/pull/80842)
-- #80359 [[Clang][Sema] Correctly look up primary template for variable template specializations](https://github.com/llvm/llvm-project/pull/80359)
-- #80171 [[Clang][Sema] Diagnose friend declarations with enum elaborated-type-specifier in all language modes](https://github.com/llvm/llvm-project/pull/80171)
-- #79760 [[Clang][NFC] Remove TemplateArgumentList::OnStack](https://github.com/llvm/llvm-project/pull/79760)
-- #79683 [Reapply "[Clang][Sema] Diagnose function/variable templates that shadow their own template parameters (#78274)"](https://github.com/llvm/llvm-project/pull/79683)
-- #78720 [[Clang][Sema] Allow elaborated-type-specifiers that declare member class template explict specializations](https://github.com/llvm/llvm-project/pull/78720)
-- #78595 [[Clang][Sema] Diagnose use of template keyword after declarative nested-name-specifiers](https://github.com/llvm/llvm-project/pull/78595)
-- #78325 [[Clang][Sema][NFC] Remove unused Scope* parameter from Sema::GetTypeForDeclarator and Sema::ActOnTypeName](https://github.com/llvm/llvm-project/pull/78325)
-- #78274 [[Clang][Sema] Diagnose function/variable templates that shadow their own template parameters](https://github.com/llvm/llvm-project/pull/78274)
-- #78243 [[Clang][Parse] Diagnose member template declarations with multiple declarators](https://github.com/llvm/llvm-project/pull/78243)
diff --git a/_posts/2024-04-22-MattsQ1Update.md b/_posts/2024-04-22-MattsQ1Update.md
deleted file mode 100644
index 2f17524e5..000000000
--- a/_posts/2024-04-22-MattsQ1Update.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: matt
-title: Matt's Q1 2024 Update
-author-id: matt
-author-name: Matt Borland
----
-
-Over the past few months I have been working on libraries that are in various stages of the boost lifecycle:
-
-## Newly Accepted Libraries
-
-### Charconv
-
-Charconv (https://github.com/boostorg/charconv) had it's review period from 15 - 25 Jan 2024.
-The review manager for this review was Chris Kormanyos, and he did an excellent job.
-I want to thank the boost community because I received a lot of good feedback during the review period, and a number of bugs were squashed.
-The first release of the library was Boost 1.85, and it seems like a number of boost libraries will use it internally in coming releases.
-Discussion of this library can be found on the Cpplang slack at `#boost-charconv`.
-
-## Libraries for Proposal
-
-### Decimal
-
-Decimal (https://github.com/cppalliance/decimal) is a ground-up implementation of the IEEE 754 Decimal Floating Point types in C++14, and is co-authored by Chris Kormanyos.
-The library continued to make progress this quarter with most of the heavy machinery of `` and `` being added.
-We have also started optimizing portions of the library such as as completely replacing the basis for `decimal128` which increased performance by up to a factor of 100.
-While not totally feature complete Chris and I believe the library is in a good position for a beta, so look forward to that announcement early in the first-half of Q2.
-Discussion of this library can be found on the Cpplang slack at `#boost-decimal`.
-
-### Multi
-
-Multi (https://github.com/correaa/Boost-Multi) is a modern C++ library that provides access and manipulation of data in multidimensional arrays, for both CPU and GPU memory.
-This is a high-quality library developed by Alfredo Correa (https://github.com/correaa).
-I will be serving as the review manager, and helping Alfredo to "boostify" the library beforehand.
-Look for an announcement for the review of this exciting library during Q2 and don't hesitate to start investigating the library now.
-The author can be found on the Cpplang slack at `#boost`.
-
-## Existing Libraries
-
-### Math
-
-New in Boost 1.85 is a number of optimization algorithms (https://www.boost.org/doc/libs/1_85_0/libs/math/doc/html/optimization.html) developed by Nick Thompson.
-This sub-library contains both classical (random_search), and novel (jSO) algorithms.
-I was able to assist him in the development and debugging of multi-threaded code which is never an easy task.
diff --git a/_posts/2024-04-22-MohammadsQ1Update.md b/_posts/2024-04-22-MohammadsQ1Update.md
deleted file mode 100644
index 654423e8e..000000000
--- a/_posts/2024-04-22-MohammadsQ1Update.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: mohammad
-title: Mohammad's Q1 2024 Update
-author-id: mohammad
----
-
-The following is an overview of some projects I have been working on in the last few months:
-
-### Boost.Beast
-
-Aside from addressing user issues and typical bug fixes within the project, this quarter I primarily focused on:
-
-- Resolving platform and compiler specific warnings.
-- Rectifying SSL builds in CI.
-- Documenting certain pitfalls and subtleties in some operations.
-
-
-### Boost.Http.Proto and Boost.Buffers
-
-I've recently started contributing to the [Http.Proto](https://github.com/cppalliance/http_proto) project. My contributions focus on refining the serializer's handling of algorithms capable of generating body contents directly within the serializer's internal buffer in one or multiple steps, for example, for sending the contents of a file as an HTTP message without relying on an external buffer. Additionally, I've converted the documentation in Boost.Buffers to Asciidoc format and leveraged [MrDocs](https://github.com/cppalliance/mrdocs) for documentation generation instead of Doxygen.
-
-
-### Boost-Gecko
-
-I've made some changes to [Boost-Gecko](https://github.com/cppalliance/boost-gecko) so it can utilize all the cores on the machine that is running on, which made the crawling operation way faster. In the next step, I added a Github workflow for the indexing operation, so now it can automatically crawl the latest version of Boost libraries documentation and upload them to Algolia.
diff --git a/_posts/2024-04-28-FernandoQ1Update.md b/_posts/2024-04-28-FernandoQ1Update.md
deleted file mode 100644
index 03af2014e..000000000
--- a/_posts/2024-04-28-FernandoQ1Update.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: fernando
-title: Fernando's Q1 Update
-author-id: fernando
----
-
-The first quarter of 2024 has been a period of deepening engagement and significant contributions to two major areas: the development of MrDocs and supporting enhancements in Boost Unordered's concurrent map capabilities.
-
-### Further Enhancements to MrDocs
-
-#### Key Developments in MrDocs:
-- **Advancing Code Understanding**: My recent work has focused on enhancing MrDocs' ability to parse and analyze complex C++ code constructs. This includes the implementation of using directives, using declarations, and namespace aliases which significantly improve the tool's ability to handle advanced C++ features.
- - **PR #545**: Implements using directives, using declarations, and namespace aliases. [View PR](https://github.com/cppalliance/mrdocs/pull/545)
- - **PR #541**: Implementing "deducing this" enhances MrDocs’ capability to understand and document modern C++ idioms. [View PR](https://github.com/cppalliance/mrdocs/pull/541)
-
-- **Integration with CMake**: I've also worked on integrating CMake more deeply into the workflow of MrDocs to streamline the generation of necessary build and configuration files:
- - **PR #539**: Refactors to fallback to `cmake --system-information` when needed. [View PR](https://github.com/cppalliance/mrdocs/pull/539)
- - **PR #532**: Executes CMake to directly obtain system configuration data, simplifying setup for users. [View PR](https://github.com/cppalliance/mrdocs/pull/532)
-
-- **Enhancing Accessibility and Usability**: To further improve the user experience, I've focused on the automatic detection of compiler default include paths, which simplifies the configuration process and ensures that MrDocs works seamlessly across different environments.
- - **PR #515**: Uses compiler default include paths to enhance cross-platform compatibility. [View PR](https://github.com/cppalliance/mrdocs/pull/515)
-
-These enhancements are designed to make MrDocs not only more user-friendly but also more powerful in handling complex C++ projects, thereby supporting the C++ community in creating better-documented and more maintainable code bases.
-
-### Collaborative Work on Boost Unordered
-
-#### Supporting Advanced Concurrent Data Structures:
-
-- **Benchmarking and Testing**: I have been collaborating closely with Joaquín on advancing the performance of Boost Unordered's concurrent map to handle high concurrency levels effectively. This work involves rigorous testing and benchmarking to identify and resolve performance bottlenecks.
-- **Learning and Applying Advanced Techniques**: Inspired by the advanced concurrency techniques used in ParlayHash, our focus has been on exploring and implementing similar strategies within Boost. Although progress has been challenging due to the need for extensive testing on many-core machines, this work is crucial for enhancing the scalability of Boost libraries in parallel computing environments.
-
-### Reflections on Remote Collaboration
-
-Collaborating with the C++ Alliance has continued to be a rewarding experience. The flexibility to collaborate across different time zones and the ability to work asynchronously on open-source projects have significantly contributed to my personal growth and professional development. The support and dynamic collaboration within the community not only foster innovation but also enhance our collective capability to tackle complex technical challenges.
-
-### Looking Forward
-
-As we move into the next quarter, I am excited about the potential for further advancements in both MrDocs and Boost Unordered. The ongoing projects not only highlight our commitment to the Boost community but also demonstrate our leadership in driving C++ innovation forward. I look forward to continuing my contributions and sharing my experiences with the community.
diff --git a/_posts/2024-04-29-dmitrys-q1-update.md b/_posts/2024-04-29-dmitrys-q1-update.md
deleted file mode 100644
index d8e6b7042..000000000
--- a/_posts/2024-04-29-dmitrys-q1-update.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: Dmitry's Q1 2024 Update
-author-id: dmitry
-author-name: Dmitry Arkhipov
----
-
-In the first quarter of 2024 I mostly continued with work on direct
-serialization, cleanup work in Boost.JSON, and experiments with its JSON parser
-and serialiser. The experiments have resulted in discovery of a significant
-performance pessimization in the parser. The fix will soon be merged into the
-project.
-
-Another experiment that will eventually bring significant performance
-improvement is replacing JSON's number serialisation code with usage of
-the Charconv library, which was recently accepted into Boost. That library has
-functions for efficient parsing and serialisation of numbers, but Boost.JSON
-can't use the parsing code for its default number parsing mode due to several
-requirements, the main one being streaming parsing. The precise number parsing
-mode, on the other hand, is already using an earlier embedded version of
-Charconv. The addition of dependency on Charconv has been delayed, though, due
-to it not supporting a few old compilers which are supported by JSON. This
-summer a major user of those compilers (an old Red Hat version) goes EOL, and I
-decided to wait until that point to deprecate support for those compilers.
-
-One small but important change was replacing internal configuration for
-endianness and instead relying on Boost.Endian for that. The original reason
-Boost.JSON had such internal configuration to begin with was the standalone
-mode, which was removed a while ago. But other changes were of higher priority
-and thus, I only got around to this now.
-
-One interesting JSON issue fixed was related to conversion of
-`filesystem::path` objects (https://github.com/boostorg/json/issues/975). Due
-to unfortunate decisions made many years ago a `path` is a sequence of itself.
-Treating paths as sequences thus results in infinite recursion. The fix added
-dedicated handling for paths and just in case forbade recursive sequences
-altogether.
diff --git a/_posts/2024-05-07-AlanQ1Update.md b/_posts/2024-05-07-AlanQ1Update.md
deleted file mode 100644
index 09fe52654..000000000
--- a/_posts/2024-05-07-AlanQ1Update.md
+++ /dev/null
@@ -1,283 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: [ alan ]
-title: Alan's Q1 Update 2024
-author-id: alan
----
-
-# Summary
-
-- [MrDocs](#mrdocs)
-- [Boost Libraries](#boost-libraries)
-- [Boost Release Tools](#boost-release-tools)
-- [Boost Website](#boost-website)
-- [C++ Github Actions](#c-github-actions)
-
-## MrDocs
-
-Over the last quarter, we continued our work on [MrDocs](https://github.com/cppalliance/mrdocs), a
-documentation generator for C++ projects. I have been overseeing and reviewing all the work done by the other
-contributors
-to the project. Besides the work done by Krystian and Fernando, George H. (an author
-of https://github.com/hdoc/hdoc[HDoc])
-also joined the project as a contributor.
-
-We have been working on making the project at least as competitive as the [Doxygen](https://www.doxygen.nl/) with
-[Docca](https://github.com/boostorg/docca) we have been using with other libraries. The goal is to use it in a
-subset of Boost libraries that will also use [Antora](https://antora.org/) for their documentation.
-In this process, we have also been improving usability and documentation, considering the feedback we received.
-
-### Features
-
-In particular, work in Q1/2024 included:
-
-- Updating the LLVM version we have been using and adapting the project to the new version
-- Updating CI workflows to use official releases for all dependencies, which is an essential step in the direction of
- making the project more stable for user instructions, CI, and standalone binaries
-- Improve the documentation to provide more straightforward installation instructions and examples.
-- New Antora plugins were included to make the documentation more interactive and easier to navigate.
-- A new CI workflow that builds and caches LLVM from source, making it easier to update the LLVM versions and
- deprecate the old workflow that required manual intervention to update binaries on the MrDocs website
-- Custom scripts and presets for dependencies that do not provide them, such as Duktape and LLVM
-- Adjust CI to replicate the instructions in the documentation and vice-versa precisely.
-- CMake Presets updated to improve the build time and reflect the documentation instructions
-- Work on sanitizing references in Javadoc commands that need to know whether the first argument is an identifier.
- Clang often provides these references as simple text.
-- Support for multi-node HTML tags: Clang represents several HTML tags by multiple nodes. MrDocs simply ignored these
- tags, but now they are correctly represented in the DOM.
-- Support for several new Javadoc and ("throws", "details", "see") HTML tags ("br", "img", "input", "hr",
- "meta", "link", "base", "area", "col", "command", "embed", "keygen", "param", "source", "track", "wbr"). These tags
- were already present in the Boost.URL documentation, but MrDocs did not support them.
-- Simplify the ASTVisitor by generalizing the traverse function.
-- Provide demos with their mrdocs.yml files and use them instead of generating new ones. This change allows the
- projects to handle their custom parameters instead of relying on the global configuration.
-- Support input filters to include or exclude files according to their paths.
-- Create a bash argument parser so that mrdocs.yml parameters that represent CMake options can use the
- same syntax used to call CMake from the command line.
-- Support for automatic CMake execution by MrDocs to generate the `compile_commands.json` file. This feature was
- implemented over many commits.
-- Support for C source files.
-- Filter sanitizer flags in the command database.
-- A number of improvements to the [C++ Actions](#c-github-actions) project directed at benefiting MrDocs.
-- Dozens of minor bug fixes and improvements to the implementation
-
-### Binaries
-
-While working on these concrete issues, I have also explored strategies to generate stable standalone binaries
-for MrDocs. The motivation is that it is time-consuming to build MrDocs, so the way MrDocs is supposed to be most
-commonly used is with scripts that download portable executables and run the tool. These scripts will usually be run
-in CI on some Ubuntu-X container.
-
-The problem is we are using recent versions of Ubuntu (Ubuntu 23.04 now) to originally build MrDocs because of the C++
-features we need. However, users (including the [Boost Release Tools](https://github.com/boostorg/release-tools))
-usually use earlier versions of Ubuntu (usually from 18.04 to 22.04) for various reasons. Binaries built on
-Ubuntu X (say 23.04) does not work under Ubuntu > lookup_users(any_connection& conn, std::span ids)
-{
- // Compose the query
- mysql::format_context ctx(conn.format_opts().value());
- ctx.append_raw("SELECT * FROM user WHERE id IN (");
- bool is_first = true;
- for (auto id: ids)
- {
- // Comma separator
- if (!is_first) ctx.append_raw(", ");
- is_first = false;
-
- // Actual id
- mysql::format_sql_to(ctx, "{}", id);
- }
- ctx.append_raw(")");
- std::string query = std::move(ctx).get().value();
-
- // Run it
- mysql::static_results res;
- co_await conn.async_execute(query, res);
- co_return {res.rows().begin(), res.rows().end()};
-}
-```
-
-That's verbose and easy to get wrong. And the price of an error here can be a vulnerability.
-To solve this, we've added built-in support for ranges:
-
-```cpp
-asio::awaitable> lookup_users(any_connection& conn, std::span ids)
-{
- // Compose the query. May generate "SELECT * FROM user WHERE id IN (10, 21, 202)"
- auto query = mysql::format_sql(conn.format_opts().value(), "SELECT * FROM user WHERE id IN ({})", ids);
-
- // Run it
- mysql::static_results res;
- co_await conn.async_execute(query, res);
- co_return {res.rows().begin(), res.rows().end()};
-}
-```
-
-Much better, isn't it? And if you need additional functionality, `mysql::sequence` allows to pass custom glue strings and per-element formatting functions. Most cases, including batch inserts, can be expressed in terms of a single format string.
-
-Our next step here is implementing an easy-to-use execution request that colaesces composing the query and executing it in a single step. This came up during the review, and it's finally going to be a reality.
-
-## Pipeline mode
-
-MySQL client/server protocol is strictly half duplex. The client sends a request, and the server responds. Performing some measuring, some use cases involving lightweight requests are dominated by round-trip time. In these cases, coalescing individual requests into a single message helps performance.
-
-Use cases fitting this description include connection setup code and preparing/closing statements in batch.
-
-The `connection_pool` class has been using this feature internally for a release, and we now expose it for the general public:
-
-```cpp
-// Sets up a connection for re-use. connection_pool cleans up connections in a similar way
-asio::awaitable setup_connection(any_connection& conn)
-{
- // Build a pipeline describing what to do.
- mysql::pipeline_request req;
- req.add_reset_connection() // wipe session state
- .add_set_character_set(mysql::utf8mb4_charset) // SET NAMES utf8mb4
- .add_execute("SET time_zone = '+00:00'"); // Use UTC as time zone
- std::vector res;
-
- // Execute the pipeline
- co_await conn.async_run_pipeline(req, res, asio::deferred);
-}
-```
-
-We can write this because we fully control the library's serialization and networking, rather than wrapping other MySQL clients.
-
-## Other Boost.MySQL work
-
-I've also worked on lesser (but necessary) tasks on Boost.MySQL, including enabling buffer size limits for `any_connection`, adding support for C++20 time types to our `date` and `datetime` types, as well as some bug fixing and refactoring.
diff --git a/_posts/2024-07-10-ChristiansQ2Update.md b/_posts/2024-07-10-ChristiansQ2Update.md
deleted file mode 100644
index c631a5dd3..000000000
--- a/_posts/2024-07-10-ChristiansQ2Update.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: christian
-title: Christian's Q2 2024 Update
-author-id: christian
-author-name: Christian Mazakas
----
-
-# Boost.Http.Proto
-
-This last quarter I worked primarily in the http-proto library, this time extending
-serialization to include chunking and zlib compression routines: deflate and gzip.
-
-This is the first time I've ever used zlib so it was definitely a learning experience
-but it's given me valuable insights into how other libraries of this nature work.
-
-More importantly was reifying the application of such dynamic code with the existing
-code which supports many possible permutations.
-
-Serialization in http-proto enables users to consume output in myriad different ways
-and the output itself can be framed and transformed as well. This forms a product
-space, in all actuality, so it was quite a feat to unify the core logic. The secret
-was operating in terms of distinct input and output buffer spaces, which could
-sometimes alias.
-
-# Boost.Compat
-
-I also dedicated some time into working on Boost's Compat module, which is home
-to several different kinds of polyfills of C++ constructs introduced in later
-C++ standards.
-
-This time I worked on `function_ref` and as far as I'm aware, I'm the first
-implementor of such a facility. The class seems very simple on the surface. Just
-a simple non-owning type-erased view of a Callable object but the devil is always
-in the details.
-
-`const` and `noexcept` each change the actual type of the function signature used
-so again, I had to test another proudct space. The testing burden for many of these
-components is quite high and while there are some, it'll be interesting to see if
-anyone actually uses this facility and how well it fares.
-
-Interestingly, `function_ref` actually has a form of currying with regards to member
-access and objects. This doesn't seem well-known and actually came as a surprise to
-me.
-
----
-
-There's a lot more to look forward to later in the year and I'll be excited to
-write the next update.
-
-- Christian
diff --git a/_posts/2024-07-10-MohammadsQ2Update.md b/_posts/2024-07-10-MohammadsQ2Update.md
deleted file mode 100644
index 2d6e1dbab..000000000
--- a/_posts/2024-07-10-MohammadsQ2Update.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: mohammad
-title: Mohammad's Q2 2024 Update
-author-id: mohammad
----
-
-The following is an overview of some projects I have been working on in the last few months:
-
-
-### Boost.Beast
-
-Besides addressing user issues and typical bug fixes, here are a couple of contributions to the Boost.Beast project that I find interesting:
-
-##### Deprecating `beast::ssl_stream` and `beast::flat_stream`
-
-The `beast::flat_stream` was originally designed to consolidate buffer sequences into a single intermediate buffer, reducing the number of I/O operations required when writing these sequences to a stream that lacks scatter/gather support. However, after Asio improved the performance of write operations in [ssl::stream](https://www.boost.org/doc/libs/develop/doc/html/boost_asio/reference/ssl__stream.html) by [linearizing gather-write buffer sequences](https://github.com/chriskohlhoff/asio/commit/17637a48ccbfa2f63941d8393a7c8316a8df4a79), the necessity for `beast::ssl_stream` and `beast::flat_stream` has decreased. Their continued inclusion has caused confusion among new users and added unnecessary complexity to the documentation and code snippets. Consequently, these streams are now deprecated, and the use of `asio::ssl::stream` is recommended in the documentation, examples, and code snippets.
-
-**Note:** Existing code will remain functional, as `beast::ssl_stream` has been redefined as a type that publicly inherits from `asio::ssl::stream`.
-
-##### Simplifying C++20 Coroutine Examples
-
-The latest Asio release introduces several changes that significantly reduce code complexity and noise. Previously, we had to redefine IO object types with an executor that has a default completion token type, like this:
-
-```C++
-using tcp_socket = asio::as_tuple_t::as_default_on_t;
-```
-
-With the new Asio release, `asio::deferred` is now the default completion token, eliminating the need to redefine IO objects with custom executors. Now, these work out of the box:
-
-```C++
-auto n = co_await http::async_read(stream, buffer, res);
-// Or with a partial completion token:
-auto [ec, n] = co_await http::async_read(stream, buffer, res, asio::as_tuple);
-```
-
-Using these features and the new [asio::cancel_after](https://www.boost.org/doc/libs/develop/doc/html/boost_asio/reference/cancel_after.html) functionality, all the C++20 coroutine examples in Beast have been refactored to be more concise and clear. Additionally, a `task_group` has been added to the [advanced_server_flex_awaitable](https://github.com/boostorg/beast/blob/develop/example/advanced/server-flex-awaitable/advanced_server_flex_awaitable.cpp) example to demonstrate a graceful shutdown process by propagating a cancellation signal to all session subtasks.
-
-##### Adding New Fuzzing Targets
-
-In this release, we addressed two bug reports that were caught by fuzzing the code: [#2879](https://github.com/boostorg/beast/pull/2879) and [#2861](https://github.com/boostorg/beast/pull/2861). Thanks to [tyler92](https://github.com/tyler92), we have added [several fuzzing targets](https://github.com/boostorg/beast/tree/develop/test/fuzz) to the project. These targets now fuzz the code with each pull request and at scheduled times throughout the day.
-
-
-### Boost.Http.IO
-
-As the [Http.Proto](https://github.com/cppalliance/http_proto) project continues to mature, we have begun enhancing the [Http.Io](https://github.com/cppalliance/http_io) examples to ensure our sans-io API aligns closely with the evolving requirements of the I/O layer. To start, I have introduced a basic [C++20 coroutine client example](https://github.com/cppalliance/http_io/blob/develop/example/client/flex_await), which will later be expanded into a tool similar to curl.
-
-
-### Boost-Gecko
-
-[Boost-Gecko](https://github.com/cppalliance/boost-gecko) now features a new index for the [learn section](https://www.boost.io/docs/) of the updated Boost website and a new crawler for [website-v2-docs](https://github.com/boostorg/website-v2-docs). This update allows users to search within the documentation in the learn section using the website's search dialog. The crawling process and index record uploads are automated through a GitHub [workflow](https://github.com/cppalliance/boost-gecko/blob/develop/.github/workflows/index_on_algolia.yml), requiring minimal maintenance for each Boost release.
diff --git a/_posts/2024-07-10-SamsQ2Update.md b/_posts/2024-07-10-SamsQ2Update.md
deleted file mode 100644
index cb5751020..000000000
--- a/_posts/2024-07-10-SamsQ2Update.md
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Sam's Q2 2024 Update
-author-id: sam
----
-
-Here's an overview of some projects I have been working on the last few months.
-
-### Boost Downloads
-
-Discuss the details of uploading windows builds to the new CDN with Tom Kent. When publishing the boost releases, they are first uploaded to an S3 bucket s3://boost-archives/. Then the CDN origin servers download the files locally.
-
-### Boost website boostorg/website-v2
-
-A Fastly CDN is configured in front of the boost.io website to increase performance and reduce download times. Fastly is hosted version of Varnish software and uses the VCL programming language. I have been participating on their community forums to find solutions and techniques to adjusting headers, removing cookies, modify cache settings. The VCL framework allows manipulating the http traffic at various phases of the packet's journey during recv, pass, fetch, hit, miss. The language is distinct, but similar to javascript or python type languages. Multiple iterations of adjusting the VCL. This is an ongoing project. Removed csrf tokens from certain website pages to facilitate caching.
-
-Configuring oauth social auth for the website domains, continued from previous months. Remove fallback default url from boost.io, to reduce traffic on S3, accelerate response times.
-
-Upload boost release notes to S3 so boost.io is able to parse antora markup files when switching from Quickbook to Antora.
-
-Launched a server regression.boost.io, an AWS EC2 instance to host uploaded boost regression test reports on the new website.
-
-Created a static html mirror of boost.org: boost.org.cpp.al.
-
-### boostorg/release-tools
-
-Developed an autocancel feature in boostorg/boost CircleCI to prevent multiple boost release builds from conflicting.
-
-Multiple updates to release-tools to support the new Fastly workflow.
-
-Switch all boost builds to use python virtual environments (python pip packages).
-
-### Mailman project
-
-Sent an email to the boost list, explaining the mailman3 project and requesting testers. Based on their feedback, researching and solving some issues in the mailman deployment at lists.preview.boost.org. Discovered an apparent bug in the mailman configuration where the home page was not immediately updating after imports or deletions. It turns out another component 'qcluster' must be running. Adjust installation scripts accordingly. Sent a (merged) pull request to upstream hyperkitty, fixing a formatting issue.
-
-### wowbagger
-
-A wowbagger disk failed on two different occasions, causing an outage of the boost.org website. Worked with David Sankel to restore the disk, recover from errors. Updated the legacy boost.org website to use Fastly archive downloads. This involved modifying numerous pages on the site and including new functions in boost-tasks.
-
-Migrated the boostorg/boost commitbot from wowbagger to Github Actions.
-
-### Jenkins
-
-New Antora doc previews for buffers, http-io, http-proto. An outstanding project with Revsys is to revamp the lcov/gcovr code coverage displays. With that in mind I have cleaned up and rewritten the gcovr scripts that Jenkins uses so those are streamlined and easier to work with in the future.
-
-### LLVM
-Meetings with Google about LLVM CI. The Alliance suggested funding an improvement in their CI infrastructure. This interaction encouraged Google to become more involved, as they had previously committed to support the clang llvm project financially. Either way, hopefully the end result will be faster LLVM CI builds.
-
-### GHA
-
-New 24.04 VM images.
-
-### cpp.al
-
-Enhanced CircleCI jobs to remove concurrency and force sequential processing/rendering of blog posts on the cppalliance.org website.
-
-### Drone
-
-Assist in debugging cppalliance/decimal, charconv, drone jobs. Publish new 24.04 drone containers. Upgrade auto-scaled VMs also. Configured swap space (disk-based memory) on auto-scaled drone agents. Refactor a few steps in the script that builds the drone server image.
diff --git a/_posts/2024-07-12-KennethQ2Update.md b/_posts/2024-07-12-KennethQ2Update.md
deleted file mode 100644
index 1a08bbbd3..000000000
--- a/_posts/2024-07-12-KennethQ2Update.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: kenneth
-title: Kenneth's Q2 2024 Update
-author-id: kenneth
-author-name: Kenneth Reitz
----
-
-## Project Overview: Boost.io Website Development
-
-During the second quarter of 2024, my primary focus has been on the development and enhancement of the new [boost.io](https://boost.io/) website. This project has been a significant undertaking, aimed at improving the online presence of the Boost C++ Libraries and providing a more user-friendly experience for our community.
-
-### Key Accomplishments:
-
-#### Onboarding to the C++ Alliance and Boost.io Repository
-
-Being a newer member of the team, I've been getting onboarded onto the [website-v2 repo](https://github.com/boostorg/website-v2) and helping other new members of the team get onboarded as well.
-
-Thankfully, the code quality in this repository is pretty high, so there's little concern in this department.
-
-#### Website Usability
-
-I spent a lot of time this quarter improving the usability of the website in subtle ways. Cleaning up the navigation, library titles, adding compatibility redirects to support URLs that were live on the [boost.org] website, etc.
-
-#### Developer Experience
-
-While there remains a lot of work in this area, I've done a few things like integrating [uv](https://astral.sh/blog/uv) into the build process to help speed up CI builds. In the future, I plan to use its `pip compile` functionality, too.
-
-#### Documentation Experience Improvements
-
-I spent a reasonable amount of time this quarter focusing on improving the navigation and usability of the [boost.io](https://boost.io/) website’s support of arbitrary documentation.
-
-Some clever improvements have been put in place to ensure that the user experience remains ideal as we continue to improve the solution, overall.
-
-Hosting documentation is not without its complexities.
-
-## Looking Forward:
-
-Working on this project has deepened my understanding of Django and web development best practices. The collaborative nature of the C++ Alliance, especially in its remote setting, has been instrumental in overcoming challenges and driving innovation in our approach to presenting Boost libraries to the community.
-
-I look forward to continuing to work on these projects and enjoy helping to make the world a better place!
diff --git a/_posts/2024-07-12-dmitrys-q2-update.md b/_posts/2024-07-12-dmitrys-q2-update.md
deleted file mode 100644
index 4681689c8..000000000
--- a/_posts/2024-07-12-dmitrys-q2-update.md
+++ /dev/null
@@ -1,122 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: Dmitry's Q2 2024 Update
-author-id: dmitry
-author-name: Dmitry Arkhipov
----
-
-In the second quarter of 2024 implementing direct parsing for Boost.JSON has
-finally been completed. Direct serialisation will support all the same types as
-direct parsing, including recently-added `std::path`. After this addition,
-Boost.JSON's set of conversion features is almost full. The only missing part
-is the ability to use a sort of temporary proxy type for conversion. E.g.
-converting from user type to `std::string`, then converting the string to JSON
-string. This is not strictly needed for `value_to/from` set of functions, as
-you can always fully customise their behaviour by extending them. But it can be
-very useful for direct parsing, as it allows using an ancillary type that
-matches the structure of the JSON, and can convert to/from the intended user
-type.
-
-A little more than a year ago it was argued in a Boost.JSON issue that
-`object::if_contains` should return `optional` rather than `value*`.
-I have experimented with that, but also went on to research whether there are
-any projects on GitHub that would be broken due to such change. Turns out
-there's a lot of such projects. Which is why I decided against that change. But
-on the other hand, there are some benefits to having non-throwing accessors
-that communicate their inability to produce a value in their return value. And
-it's better to use a single kind of type for this. There is in fact an ideal
-candidate for such return type: `boost::system::result`. And in fact Boost.URL
-is using it in its return types extensively, eschewing the classic dual API
-approach (where one overload throws, and another overload uses an `error_code&`
-out parameter). If I could turn back time, I would have replaced the
-out-parameter overloads with ones returning `boost::system::result`. But as
-that would be way too significant API change now, I instead added those
-accessors, giving them `try_` prefix (modelled after the already existing
-`try_value_to`). As a tangentially-related change, I added a defaulted
-`source_location` parameter to throwing accessor functions. This is a small
-quality of life improvement, that (if supported by the compiler) stores the
-location of the call site in the exception. The result is that the exception
-stores both the line where the user calls a Boost.JSON function, and (inside
-the `error_code`) the line where the error state was originally reached. This
-information should greatly simplify investigating issues that occur when using
-the library.
-
-A significant amount of time these past few months was occupied by a somewhat
-new project: Python-based reimplementation of
-[Docca](https://github.com/boostorg/docca). Docca is a set of XSLT stylesheets
-that converts [Doxygen](https://www.doxygen.nl) XML output into an API
-reference written in [Quickbook](https://github.com/boostorg/quickbook) markup
-language. Unfortunately, XSLT is both rather obscure, and rather cryptic, which
-results in difficulty fixing its bugs. As my frustrations piled up, I decided
-to write a new tool that would essentially do the same job, but in a more
-popular language, and designed for higher genericity. I chose Python for
-implementation largely due to its [Jinja](https://palletsprojects.com/p/jinja/)
-text template engine (another reason is Python's availability on virtually all
-platforms).
-
-The core of the design is operating in two steps: 1) collecting
-data from Doxygen XML output, organising it in a usable way, and sometimes even
-fixing Doxygen's warts and 2) generating output using Jinja. This kind of
-model/view separation isn't just philosophically more pleasant, but also makes
-the tool more generally useful. As previously mentioned Docca produces a
-specific style of API reference written in Quickbook. But this new
-implementation can use a different Jinja template to produce a different style
-of API reference: e.g. using another approach to sectioning, or having all
-overloads on one page a-la
-[Cppreference](https://en.cppreference.com/w/cpp/container/vector/vector). It
-can also produce output in an entirely different format, and I intend to take
-advantage of that in order to switch Boost.JSON's documentation to Asciidoc. I
-am also experimenting with running the tool an additional time to generate
-"macros" that expand into links to documented entities and could be used in the
-narrative documentation that precedes the reference.
-
-On the opposite side of this two phase arrangement is the ability to add
-alternative data collection algorithms. While currently Doxygen reigns supreme
-in the field of collecting documentation comments from C++ sources, there is a
-very ambitious project from the C++ Alliance called
-[Mr. Docs](https://github.com/cppalliance/mrdocs). As soon as it reaches enough
-maturity, a new data collector could be added to Docca.
-
-And finally, I wanted to tell about some changes I've contributed to Boost's
-build infrastructure, but I need to start from a far. The build system
-officially supported by Boost is called [b2](https://www.bfgroup.xyz/b2/)
-(formally known as Boost.Build). Unlike most modern build systems it doesn't
-use the 2 step configure-build approach championed by Autotools, and instead
-does the whole build by itself. As a result, it's a fully multi-config build
-system, that is a single invocation of the build system can request targets
-built with different values for particular property, resulting in building of
-multiple variations of those targets. For example, you can request a target to
-be built by different compilers, targeting different OSes and architectures,
-using different C++ standards and dialects, and so on. b2 would calculate the
-Cartesian product of all requested options, and create the resulting binaries
-for each of the variations. For historical reasons, there was no good support
-for this multi-config paradigm with installation. `install` targets explicitly
-specified their intended installation location, which resulted in build errors
-if a multi-config installation was attempted. The issue wasn't fundamental to
-the build system, and Boost had employed a particular mitigation to that issue.
-But I wasn't satisfied with the status quo, so a few years ago I added explicit
-support for named installation directories to the `install` rule, with those
-directories' configuration being a part of the build variation. After the
-change a project could do something like
-
-```
-exe app : app.cpp ; # executable target app
-install install : app : (bindir) ; # install target install
-```
-
-And then with `b2 install-prefix=/usr/local` install `app` into
-`/usr/local/bin`, and with `b2 install-prefix=/opt/myapp` install `app` into
-`/opt/myapp/bin`. You could then go with a conditional property configuration,
-where e.g. target OS `a` implies location 1, and OS `b` requires location 2,
-and so on.
-
-The feature did require changing build scripts, and so at the time Boost
-remained with the set up it already had. But this spring, inspired by other
-needs I finally got around to changing Boost.Install (an ancillary project used
-by Boost's build scripts) to use this functionality. One thing led to another,
-and now not only users can configure per-config installation directories, but
-finally Boost has staging directory support via `staging-prefix`. E.g.
-`b2 --prefix=/opt/Boost/ staging-prefix=/var/tmp/1` installs into `/var/tmp/1`,
-but files are created with the intention to be later moved to `/opt/Boost/`.
diff --git a/_posts/2024-07-13-AlanQ2Update.md b/_posts/2024-07-13-AlanQ2Update.md
deleted file mode 100644
index e88f42c93..000000000
--- a/_posts/2024-07-13-AlanQ2Update.md
+++ /dev/null
@@ -1,240 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: [ alan ]
-title: Alan's Q2 Update 2024
-author-id: alan
----
-
-# Summary
-
-- [MrDocs](#mrdocs)
-- [Boost Libraries](#boost-libraries)
-- [Boost Release Tools](#boost-release-tools)
-- [Boost Website](#boost-website)
-- [C++ Github Actions](#c-github-actions)
-
-## MrDocs
-
-[MrDocs](https://github.com/cppalliance/mrdocs) is a tool for generating reference documentation from C++ code and
-javadoc comments. I have been overseeing and reviewing all contributions to the project.
-
-- Krystian has focused on metadata extraction and issues related to Clang.
-- Fernando has been tackling usability, critical bugs, and essential features.
-- I'm enhancing the CI setup and working on Antora extensions, which will be incorporated into website-v2-docs.
-
-Currently, metadata extraction primarily involves identifying SFINAE techniques in the code, enabling MrDocs to natively
-comprehend C++ constructs.
-
-In the last quarter, I authored several documents comparing MrDocs to its alternatives:
-
-- Gap Analysis: Compares MrDocs to alternatives, identifying and prioritizing missing features.
-- Minimum Viable Product (MVP): Outlines the features necessary for MrDocs to be competitive.
-- Product Pitch: Explains MrDocs' value proposition.
-
-The MVP aims to include features common to all alternative tools, preventing initial adoption blockers and ensuring a
-positive first impression. Additionally, the MVP includes unique features of MrDocs, such as native comprehension of C++
-constructs. We identified numerous selling points for MrDocs, including maintainability, multiple output formats, and
-its understanding of C++ constructs.
-
-We created [a GitHub project](https://github.com/orgs/cppalliance/projects/2/views/1) to track MrDocs' progress, with
-issues categorized by priority:
-
-- P0 - Boost Features: Critical bugs and usability issues, using Boost.URL as a proof of concept.
-- P1 - Main MVP Features: Essential MVP features requested by users and Boost projects.
-- P2 - Technical Debt: Technical debt from P0 and P1.
-- P3 - All MVP Features: All other MVP features.
-- P4 - Non-MVP Features: Features beyond the MVP outlined in the gap analysis.
-
-P0 encompasses all issues necessary for MrDocs to be utilized by an Antora extension in the Boost Release Tools. Using
-Boost.URL as a proof of concept, our goal in P0 is to replace the Doxygen + Docca workflow with MrDocs. We completed P0
-recently, with partial results available at https://843.urlantora.prtest2.cppalliance.org/site/url/index.html
-
-As we transition to P1, I'm adapting the Boost.URL project with Antora extensions to include reference documentation
-generated by MrDocs in the [website-v2-docs](https://github.com/boostorg/website-v2-docs) Antora project. P1 includes
-all features identified in our Gap Analysis for MrDocs to be competitive with alternatives. We are also improving
-usability and documentation based on feedback to ensure users can effectively utilize the tool. Completing P0 and P1
-will accumulate enough technical debt to start addressing P2.
-
-In Q2/2024, we implemented several features and fixes, including:
-
-- Detect SFINAE idiom
-- Extract using directives, declarations, namespace aliases
-- Extract pack expansions in base-specifiers
-- Extract variadic functions
-- Extract explicit specifiers
-- Detect implementation-defined and see-below namespaces
-- Support @pre, @post, and @n commands
-- Annotate templates with See Also sections, constructors, and destructors
-- Ensure HTML templates match AsciiDoc templates
-- Implement select handlebars helper
-- Link names in overload sets
-- Include full nested name specifiers in titles
-- Ensure safe names for section IDs
-- Provide warnings for duplicate commands
-- Propagate dependency modes
-- Extract dependencies for templates
-- Check for atomic shared pointer support
-- Update XML schema
-- Escape names of overload set members
-- Restore HTML demos
-- Normalize source-root
-- Ensure global namespace is always present
-- Remove symbol type from titles
-- Update LLVM version to 7a28a5b3 and adapt the project
-- Treat references to purposefully excluded symbols as non-errors
-- Emit error messages for invalid javadoc commands
-- Manage info and config tables as .inc files
-- Print diffs in golden tests
-- Create CI demo variants with command line variants
-- Build statically for MrDocs to be used in CI by other projects
-- Use default containers for each compiler
-- Update CI to use official releases for dependencies, enhancing stability
-- Compare generated documentation in PRs with a demo comparison artifact
-- Include ref_name in demo artifact names for proper PR comparison
-- Upload documentation to the website
-- Ensure template consistency with checks
-- Split test targets
-- Set default build type to RelWithDebInfo
-- Clean headers and footers in docs
-- Create a demo page in the documentation using a custom Antora plugin
-- Add an install page with direct links to GitHub releases
-- Provide more extensive usage examples
-- Complete the configuration file reference
-- Include design notes on rationale and philosophy
-- Create a Contributor Guide
-- Refer to official documentation in README.adoc
-- Add a new banner referring to Mr. Docs
-
-### Integrations
-
-As Boost.URL continues to integrate MrDocs, it has inspired the necessary features in MrDocs and is temporarily
-generating documentation with both Doxygen+Docca and Antora+MrDocs. The same workflow is implemented
-in [Boost.Buffers](https://github.com/cppalliance/buffers), inspiring new feature requests such as identifying Niebloids
-and SFINAE constraints in documentation.
-
-The features implemented in Boost.URL are described in the [Boost Libraries](#boost-libraries) section.
-
-### General work
-
-Overall, my responsibilities in MrDocs include:
-
-- Setting up and maintaining CI for the project;
-- MrDocs and LLVM release binaries;
-- Build scripts;
-- Setting up and integrating dependencies;
-- Setting up and deploying the Antora toolchains and documentation to the project website;
-- Working on supporting libraries;
-- Supervising and reviewing the work done by other contributors (Krystian and Fernando); and
-- Fixing bugs.
-
-## Boost Libraries
-
-The Boost library I've dedicated the most time to is Boost.URL. The library is in maintenance mode, but there is a
-constant demand for bug fixes and documentation improvements. Recent commits focus on the Antora+MrDocs workflow:
-
-- MrDocs collector plugin identifies versioned compiler executables on the host machine
-- Documentation includes a manual reference table
-- Refactored source code to distinguish details, implementation-defined, and see-below symbols
-- Created a new MrDocs target that includes all headers, reducing MrDocs run time from ~8 minutes to ~3 seconds and
- ensuring all headers are included in the documentation
-
-Other significant updates in CI and documentation include:
-
-- CI: Added support for Node.js 20 actions due to GitHub updating the base Node.js version to 20. The lack of static
- linking in Node.js caused errors related to glibc versions.
-- CI: Updated the Antora workflow to use Clang 18, addressing issues with standard library synchrony between the host
- machine and LLVM/Clang versions.
-- Docs: string_token exposition
-- CI improvements were coordinated with the C++ Github Actions project, which implemented new features for these
- scenarios.
-
-## Boost Release Tools
-
-I have integrated the toolchains I developed into the Boost Release Tools, adding support for features desired for the
-new Boost website, including the Antora+MrDocs workflow.
-
-Since the last report, we adapted the documentation workflow to relocate the Antora documentation in the release.
-Additionally, Boost.URL documentation in the Antora format will be included in the release once we enable it in
-website-v2-docs. We are delaying this to avoid disruptions during the current release cycle. Meanwhile, we continue
-using the Doxygen+Docca workflow and are developing the Antora UI for the next release.
-
-## Boost Website
-
-Among support projects for the new Boost website, I have been particularly involved
-with [`website-v2-docs`](https://github.com/boostorg/website-v2-docs), which includes the Boost website documentation as
-an Antora project. Its components cover the "User Guide," "Contributor Guide," and "Formal Review" sections.
-
-Since the project's inception, I have been overseeing and reviewing contributions from other team members.
-
-Overall, my responsibilities include:
-
-- Reviewing and merging pull requests
-- Setting up and maintaining CI
-- Coordinating with the website project on content uploaded to AWS buckets
-- Developing reusable build scripts for release tools and previews
-- Writing technical sections of the documentation
-- Developing custom Boost/Antora extensions, such as the Boost Macro extension
-- Maintaining the Antora toolchain and templates
-- Adjusting Boost libraries to match the expected formats
-
-For the next quarter, we plan to update the Antora UI bundle to resemble the current Boost website style.
-
-## C++ Github Actions
-
-[C++ Github Actions](https://github.com/alandefreitas/cpp-actions) is a project I have maintained since 2023. It is a
-collection of composable, independent, and reusable GitHub Actions for any C++ project needing testing on various
-compilers and environments.
-
-MrDocs, Boost.URL, Boost.HTTP, and Boost.Buffers currently use these actions in their CI. These projects provide
-valuable feedback to improve the actions.
-
-The project includes actions to:
-
-- Generate a Github Actions Matrix for C++ projects;
-- Setup C++ compilers;
-- Install and setup packages;
-- Clone Boost modules;
-- Run complete CMake and `b2` workflows;
-- Generate changelogs from conventional commits;
-- Generate summaries; and
-- Generate time-trace reports and flame graphs
-
-The actions are designed to be modular and interoperable. Each action has a specific role, such as configuring an
-environment or building a C++ project. They can be composed to create customized CI/CD workflows.
-
-One notable problem with GitHub actions in Q1/2024 is that GitHub decided to update the base version of Node.js to 20.
-Because Node.Js is not statically linked, this change [broke](https://github.com/actions/setup-node/issues/922) many
-actions that depended on it. All projects that depended on older containers were getting an error related to the
-appropriate `glic` version not being found. Not updating to Node.js 20 would also give users deprecation warnings.
-
-We ended up implementing a solution where older containers suggested by the `cpp-matrix` action
-have a `volumes` key to create a directory where node can be installed in such a way that GitHub actions will
-use this node executable. If using an old container, the user is responsible for installing Node.js in that
-directory. Examples are provided in the repository.
-
-Thus, a number of new features and fixes were added to the C++ Actions project in Q2/2024.
-
-- Feature: support clang >=18 (which is also critical for MrDocs workflows)
-- Feature: cpp-matrix force factor flags
-- Feature: cpp-matrix `is-container` auxiliary key
-- Feature: cpp-matrix older containers suggest volumes for node 20
-- Fix: cpp-matrix windows runner has MSVC 14.40.33810
-- Fix: cpp-matrix default macOS is macos-14
-- Fix: setup-gcc GCC 14 comes from Ubuntu 24.04
-- Fix: setup-gcc ensure software-properties-common
-- Fix: setup-clang verify LLVM repo release files to filter repositories that do not exist
-- Refactor: package-install set DEBIAN_FRONTEND to noninteractive
-- Refactor: cpp-matrix extract support sanitizer as boolean
-- Refactor: all actions use node20
-- Refactor: cpp-matrix pkg-config included in container default installs
-- CI: only create releases for tags
-- CI: external actions updated
-- CI: matrix.cxx not required
-- CI: b2-workflow variant2 does not test on windows
-- CI: older containers patch node
-- CI: update actions/checkout to v4
-- Test: cpp-matrix custom suggestions
-- Docs: cpp-matrix open ranges
-
-Most work has been done to fix issues revealed by testing the actions in new environments.
diff --git a/_posts/2024-07-14-BradenQ2Update.md b/_posts/2024-07-14-BradenQ2Update.md
deleted file mode 100644
index be9ad8375..000000000
--- a/_posts/2024-07-14-BradenQ2Update.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: braden
-title: Braden's Q2 2024 Update
-author-id: braden
-author-name: Braden Ganetsky
----
-
-## Speaking at C++Now
-
-At C++Now 2024, I gave my first full-length conference talk, titled "Unit testing an expression template library in C++20". The slides are up at [this link](https://ganets.ky/slides/2024-cppnow/). As of the time of writing, the talk has not yet been uploaded to YouTube.
-
-In this talk, I explored the current state of compile-time unit testing in C++ in the well-known unit testing libraries. I also discussed my own methods for unit testing at compile-time. This talk especially focuses on giving helpful diagnostics when encountering an error at compile-time. I used my [tok3n](https://github.com/k3DW/tok3n) library (a personal project) as a backdrop for the testing, which has been the driving force behind exploring this area in the first place.
-
-I had an enjoyable time preparing and giving this talk, and I certainly learned a lot. I appreciate the support from the others at the C++ Alliance, for helping me to grow as a speaker and to have this amazing opportunity to speak at C++Now 2024.
-
-## Boost.Unordered Visual Studio Natvis support
-
-In Q2 2024, I created the "boost_unordered.natvis" file, to give user-friendly visualizations for all Boost.Unordered containers in the Visual Studio debugger. In doing this, I learned quite a lot about Natvis files, which I wrote about in [this article](https://blog.ganets.ky/NatvisForUnordered/), titled "Natvis for boost::unordered_map, and how to use elements".
-
-Unfortunately, I wasn't initially able to support *all* Boost.Unordered containers. I thought I had to exclude the containers with allocators that use fancy pointers (like `boost::interprocess::offset_ptr`), but I consulted with Joaquín to eventually find a solution. It didn't technically make it for Q2, but it's close enough. I wrote [a 2nd article](https://blog.ganets.ky/NatvisForUnordered2/) explaining this whole process.
-
-I am excited to continue improving the user experience for Boost.Unordered. Next up, I'll be tackling GDB pretty-printers for the containers.
diff --git a/_posts/2024-07-15-KrystiansQ2Update.md b/_posts/2024-07-15-KrystiansQ2Update.md
deleted file mode 100644
index 32098255e..000000000
--- a/_posts/2024-07-15-KrystiansQ2Update.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's Q2 2024 Update
-author-id: krystian
----
-
-# MrDocs
-
-This quarter, my primary focus was on MrDocs. To that end, I'm happy to say that the P0 milestone for MrDocs has been reached! Although the vast majority of the work I did was bug hunting/fixing, I also implemented a novel feature which detects and simplifies types that use the SFINAE idiom.
-
-Prior to C++20, the primary mechanism for constraining templates was through use of the SFINAE idiom, e.g.:
-
-```cpp
-template
-std::enable_if_t, T> f(T); // only viable when T is a pointer type
-```
-
-These kinds of constrained declarations can be rather "noisy" due to the constraints being written into the type of the declaration. One solution is to use macros to show the simplified type when generating documentation:
-
-```cpp
-template
-#ifndef DOCS
-std::enable_if_t, T>
-#else
-T
-#endif
-f(T);
-```
-
-but this requires manual intervention on the part of the library author. Since MrDocs generates documentation using the clang AST, we can detect whether a template implements the SFINAE idiom and show the "replacement type" instead:
-
-```cpp
-template
-std::enable_if_t, T> // rendered as 'T' in the documentation since std::enable_if_t is a SFINAE template
-f(T);
-```
-
-In general, MrDocs determines whether a type naming a member `M` of a class template `C` uses the SFINAE idiom by identifying every declaration of `M` in the primary template and every partial/explicit specialization of `C`. If `M` is not found in at least one of the class definitions, and if every declaration of `M` declares it as an alias for the same type as every other declaration, then `C` is a template implementing the SFINAE.
-
-# Clang
-
-In addition to working on MrDocs, I merged a number of clang patches this quarter:
-
-- #98567 [[Clang][AST] Move NamespaceDecl bits to DeclContext](https://api.github.com/repos/llvm/llvm-project/issues/98567)
-- #98563 [[Clang][AST] Don't use canonical type when checking dependence in Type::isOverloadable](https://api.github.com/repos/llvm/llvm-project/issues/98563)
-- #98547 [Reapply "[Clang] Implement resolution for CWG1835 (#92957)"](https://api.github.com/repos/llvm/llvm-project/issues/98547)
-- #98167 [[Clang][Sema] Handle class member access expressions with valid nested-name-specifiers that become invalid after lookup](https://api.github.com/repos/llvm/llvm-project/issues/98167)
-- #98027 [[Clang][Index] Add support for dependent class scope explicit specializations of function templates to USRGenerator](https://api.github.com/repos/llvm/llvm-project/issues/98027)
-- #97596 [[Clang][Sema] Correctly transform dependent operands of overloaded binary operator&](https://api.github.com/repos/llvm/llvm-project/issues/97596)
-- #97455 [[Clang][Sema] Fix crash when rebuilding MemberExprs with invalid object expressions](https://api.github.com/repos/llvm/llvm-project/issues/97455)
-- #97425 [[Clang][Sema] Treat explicit specializations of static data member templates declared without 'static' as static data members when diagnosing uses of 'auto'](https://api.github.com/repos/llvm/llvm-project/issues/97425)
-- #96364 [[Clang][Parse] Fix ambiguity with nested-name-specifiers that may declarative](https://api.github.com/repos/llvm/llvm-project/issues/96364)
-- #93873 [[Clang][Sema] Diagnose variable template explicit specializations with storage-class-specifiers](https://api.github.com/repos/llvm/llvm-project/issues/93873)
-- #92957 [[Clang] Implement resolution for CWG1835](https://api.github.com/repos/llvm/llvm-project/issues/92957)
-- #92597 [[Clang][Sema] Diagnose current instantiation used as an incomplete base class](https://api.github.com/repos/llvm/llvm-project/issues/92597)
-- #92452 [[Clang][Sema] Fix crash when diagnosing near-match for 'constexpr' redeclaration in C++11](https://api.github.com/repos/llvm/llvm-project/issues/92452)
-- #92449 [[Clang][Sema] Do not add implicit 'const' when matching constexpr function template explicit specializations after C++14](https://api.github.com/repos/llvm/llvm-project/issues/92449)
-- #92425 [[Clang][Sema] ASTContext::getUnconstrainedType propagates dependence](https://api.github.com/repos/llvm/llvm-project/issues/92425)
-- #92318 [[Clang][Sema] Don't build CXXDependentScopeMemberExprs for potentially implicit class member access expressions](https://api.github.com/repos/llvm/llvm-project/issues/92318)
-- #92283 [Reapply "[Clang][Sema] Earlier type checking for builtin unary operators (#90500)"](https://api.github.com/repos/llvm/llvm-project/issues/92283)
-- #92149 [Revert "[Clang][Sema] Earlier type checking for builtin unary operators (#90500)"](https://api.github.com/repos/llvm/llvm-project/issues/92149)
-- #91972 [[Clang][Sema] Fix bug where operator-> typo corrects in the current instantiation](https://api.github.com/repos/llvm/llvm-project/issues/91972)
-- #91620 [[Clang][Sema] Revert changes to operator= lookup in templated classes from #91498, #90999, and #90152](https://api.github.com/repos/llvm/llvm-project/issues/91620)
-- #91534 [[Clang][Sema] Do not mark template parameters in the exception specification as used during partial ordering](https://api.github.com/repos/llvm/llvm-project/issues/91534)
-- #91498 [[Clang][Sema] Fix lookup of dependent operator= outside of complete-class contexts](https://api.github.com/repos/llvm/llvm-project/issues/91498)
-- #91393 [Reapply "[Clang] Unify interface for accessing template arguments as written for class/variable template specializations (#81642)"](https://api.github.com/repos/llvm/llvm-project/issues/91393)
-- #91339 [[Clang][Sema] Don't set instantiated from function when rewriting operator<=>](https://api.github.com/repos/llvm/llvm-project/issues/91339)
-- #90999 [[Clang][Sema] Fix template name lookup for operator=](https://api.github.com/repos/llvm/llvm-project/issues/90999)
-- #90760 [[Clang][Sema] Explicit template arguments are not substituted into the exception specification of a function](https://api.github.com/repos/llvm/llvm-project/issues/90760)
-- #90517 [[Clang][Sema][Parse] Delay parsing of noexcept-specifiers in friend function declarations](https://api.github.com/repos/llvm/llvm-project/issues/90517)
-- #90500 [[Clang][Sema] Earlier type checking for builtin unary operators](https://api.github.com/repos/llvm/llvm-project/issues/90500)
-- #90478 [[Clang] Propagate 'SystemDrive' environment variable for unit tests](https://api.github.com/repos/llvm/llvm-project/issues/90478)
-- #90152 [Reapply "[Clang][Sema] Diagnose class member access expressions naming non-existent members of the current instantiation prior to instantiation in the absence of dependent base classes (#84050)"](https://api.github.com/repos/llvm/llvm-project/issues/90152)
-- #90104 [[Clang][Sema] Fix warnings after #84050](https://api.github.com/repos/llvm/llvm-project/issues/90104)
\ No newline at end of file
diff --git a/_posts/2024-09-12-Safe-Cpp-Partnership.md b/_posts/2024-09-12-Safe-Cpp-Partnership.md
deleted file mode 100644
index f327bae5c..000000000
--- a/_posts/2024-09-12-Safe-Cpp-Partnership.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: vinnie
-title: Safe C++ Partnership
-author-id: vinnie
----
-
-# Safe C++ Partnership
-
-I am happy to announce that the C++ Alliance has formed a partnership with
-Sean Baxter, a renowned engineer, to develop the
-[Safe C++ Extensions proposal](https://safecpp.org/draft.html).
-
-This is a revolutionary proposal that adds memory safety features to the C++
-programming language.
-
-This collaboration marks a significant milestone in the C++ ecosystem, as the
-need for safe code has never been more pressing. With the increasing importance
-of software security and reliability, developers are facing mounting pressure to
-adopt safer coding practices. The Safe C++ Extensions aim to address this
-critical need by introducing novel features that prevent common memory-related
-errors.
-
-We are thrilled to be working with Sean Baxter on this crucial initiative.
-The Safe C++ Extensions represent a major step forward in making C++ more secure
-and efficient, while preserving the language's performance and flexibility.
-
-The Safe Standard Library is a key component of the Safe C++ Extensions proposal.
-This extensive library addition will provide developers with robust, memory-safe
-implementations of essential data structures and algorithms. By integrating
-these components into the C++ Standard Library, we can ensure that new code is
-written with safety in mind from the outset.
-
-The C++ Alliance and Sean Baxter are seeking feedback from developers, researchers,
-and other stakeholders on the Safe C++ Extensions proposal. This collaborative
-process will help refine the project's scope and ensure that it addresses the most
-pressing needs of the C++ ecosystem.
-
-To view the latest draft of the Safe C++ Extensions proposal, learn more about
-Safe C++, or participate in discussions, please visit the official website at
-[https://safecpp.org](https://safecpp.org)
-or join us in the Official C++ Language Slack Workspace
-by signing up at
-[https://cpp.al/slack](https://cpp.al/slack)
-and visiting the #safe-cpp channel.
diff --git a/_posts/2024-10-11-Joaquins2024Q3Update.md b/_posts/2024-10-11-Joaquins2024Q3Update.md
deleted file mode 100644
index 74e994116..000000000
--- a/_posts/2024-10-11-Joaquins2024Q3Update.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: joaquin
-title: Joaquín's Q3 2024 Update
-author-id: joaquin
-author-name: Joaquín M López Muñoz
----
-
-During Q3 2024, I've been working in the following areas:
-
-### Boost.Unordered
-
-* Made visitation exclusive-locked within certain
-`boost::concurrent_flat_set` operations to allow for safe mutable modification of elements
-([PR#265](https://github.com/boostorg/unordered/pull/265), to be released in Boost 1.87.0).
-This is relevant for the work on Boost.Flyweight discussed below.
-* Added new concurrent, node-based containers `boost::concurrent_node_map` and
-`boost::concurrent_node_set` ([PR#271](https://github.com/boostorg/unordered/pull/271),
-to be released in Boost 1.87.0). These containers are, expectedly, slower than their flat
-counterparts, but provide pointer stability, which can be critical in some scenarios.
-* Fixed `std::initializer_list` assignment issues for open-addressing containers
-([PR#277](https://github.com/boostorg/unordered/pull/277), to be released in Boost 1.87.0).
-* Added `insert_and_visit` and related operations to concurrent containers
-([PR#283](https://github.com/boostorg/unordered/pull/283), to be released in Boost 1.87.0).
-`insert_or_visit(x, f)` invokes the visitation function `f` only if the element is _not_
-inserted (that is, it already existed). By contrast, `insert_and_visit(x, f1, f2)` invokes
-`f1` when the element is newly inserted, or `f2` otherwise. This operation can't be
-easily (or at all) emulated by user code, so it made sense that it be provided natively.
-* Reviewed Braden's work on [PR#269](https://github.com/boostorg/unordered/pull/269)
-and [PR#274](https://github.com/boostorg/unordered/pull/274)
-(to be released in Boost 1.87.0).
-
-### Boost.Flyweight
-
-* Marked the interface of `boost::flyweight` as `noexcept` where appropriate
-([PR#16](https://github.com/boostorg/flyweight/pull/16), to be released in Boost 1.87.0).
-* In response to a request from user Romain on Slack, added `concurrent_factory`
-([PR#17](https://github.com/boostorg/flyweight/pull/17), to be released in Boost 1.87.0).
-This factory, which is built on top of `boost::concurrent_node_set`,
-provides [excellent performance](https://www.boost.org/doc/libs/master/libs/flyweight/doc/examples.html#example9)
-in multithreaded population scenarios as it does not require any external locking policy.
-
-### Boost.MultiIndex
-
-* Updated CI support for this library ([PR#75](https://github.com/boostorg/multi_index/pull/75)).
-Although nominally C++03 compliant, Boost.MultiIndex has been brought to require
-C++11 by way of its internal dependencies; this opens the possibility of eventually
-modernizing the code base, and in particular getting rid of its usage of Boost.MPL
-in favor of Boost.Mp11. Stay tuned.
-
-### Boost.Bimap
-
-* Reviewed and merged [PR#45](https://github.com/boostorg/bimap/pull/45)
-(to be released in Boost 1.87.0).
-
-### Boost promotion and new website
-
-* Authored the Boost 1.86 announcement [tweet](https://x.com/BoostLibraries/status/1823783597792485433).
-* I've served as the PM for the new Boost website project (preview at [boost.io](https://boost.io)).
-On September 24 I transferred PM responsibilities to Rob Beeston, who's been doing
-an awesome job at it since. I'll be keeping an eye to this project, though, and will
-help any way I can.
-
-### Support to the community
-
-* I've proofread the excellent articles by Braden Ganetsky on
-[Natvis](https://blog.ganets.ky/NatvisForUnordered2/) and
-[GDB](https://blog.ganets.ky/PrettyPrinter/) debugging support for Boost.Unordered.
-* Reviewed and provided feedback for an early reference section of
-Alfredo Correa's [Boost.Multi](https://gitlab.com/correaa/boost-multi) upcoming proposal.
-* The Boost Asset Stewardship Review that took place on September [determined](https://lists.boost.org/boost-announce/2024/09/0630.php)
-that a Fiscal Sponsorhip Committee (FSC) be created in charge of representing the
-Boost community in its relationship with the C++ Alliance as the newly elected
-fiscal sponsor for the project. I was appointed as a member of the FSC alongside
-Ion Gaztañaga and René Rivera, and will do my best to serve the community
-from that position.
diff --git a/_posts/2024-10-20-Christian2024Q3Update.md b/_posts/2024-10-20-Christian2024Q3Update.md
deleted file mode 100644
index 44d164f3e..000000000
--- a/_posts/2024-10-20-Christian2024Q3Update.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: christian
-title: "The Safe C++ Saga"
-author-id: christian
-author-name: Christian
----
-
-## The Safe C++ Saga
-
-I've spent the last quarter developing a successor to the C++ stdlib in Safe C++, the
-proposal from Sean Baxter to add borrow-checking semantics to C++.
-
-I've been an avid Rust fan since I started messing around with it in my spare time a few
-years ago so hopping on this project was an incredibly exciting opportunity. Getting to
-evolve a standard library in tandem with a language that attempts to do the nigh-on-impossible
-is a once-in-a-lifetime opportunity, really.
-
-Public reception, however, hasn't always been great. Most people viewed a new stdlib as a huge
-mark _against_ Safe C++.
-
-Many C++ developers aren't aware of the shift in object model that borrow-checking brings. It
-brings the long-fabled destructive move to C++ and its objects. Initialization analysis and the
-other components of borrow-checking enable the compiler to permit true relocation. Objects can be
-freely memcpy'd back and forth and this is well-defined because the language guarantees that objects
-are not accessed post-destruction and that their destructors are _not_ run. Borrow checking also
-protects against relocating self-referential structs as well, because a move cannot be done through a borrow.
-
-Out of this naturally falls alternative ways of creating library components. The current stdlib components
-are coded against the original object model of C++: that there's no such thing as relocation, there's only
-move and copy construtors. Object lifetime can begin using a view of an existing object. There's no notion
-of an object being destroyed after move or copy and if there were, the semantics for how this would work in C++
-is unclear because non-trivial destructors would still need to get run and what's more, there's no language
-mechanism to prevent accesses to the moved-from or relocated-from object.
-
-Borrow checking is such a fundamental shift to systems languages that a new standard library is a natural consequence.
-It's a change so fundamental that many want to outright reject it, as it would appear to steer the language's direction
-too radically. I can empathize with this and I agree with it on some level. But by the same token, I view it as
-a dramatic simplification and reification of everything we've worked towards and built since C++'s inception.
-
-One thing that always got lost in translation was that Rust isn't really that original. Most of its ideas have already
-been discussed and thought about for awhile. It's just the only systems language we have that applied these ideas and
-it's shown that it's a successful endeavor. Safe C++ is then an experiment to do the same for C++ and it's working equally
-well. Safe C++ has proven that you can use exclusive mutability and borrow checking in C++ and it works.
-
-I love systems programming and I love C++ and I love Rust and I think the world is a better place when we do steal ideas
-from each other. In college, I took some creative writing/poetry courses and my professor mentioned an old adage that
-all the best writers steal and that's only proven itself true as times goes on for me. Shamelessly stealing the good ideas
-from other sources is where true innovation comes in because it creates novel ideas made from currently-working ones.
-
-I'm optimistic about the eventual future of C++ because we've proven that a truly safe C++ is possible. The only thing
-standing in the way is ourselves but that is a much more difficult problem to solve.
-
-- Christian
diff --git a/_posts/2024-10-20-Ruben2024Q3Update.md b/_posts/2024-10-20-Ruben2024Q3Update.md
deleted file mode 100644
index 6ec81f5aa..000000000
--- a/_posts/2024-10-20-Ruben2024Q3Update.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: ruben
-title: "Boost.MySQL 1.87 features: with_params and with_diagnostics"
-author-id: ruben
-author-name: Rubén Pérez Hidalgo
----
-
-## Easy client-side SQL using with_params
-
-In previous posts, we had seen how Boost.MySQL client-side SQL formatting
-can be used to implement a wide variety of use cases. It was originally
-created for complex cases, like batch inserts, but it can also be applied for
-simple ones. Up to Boost 1.86, to retrieve a user by ID with client-side SQL,
-you could write this:
-
-```cpp
-void lookup(mysql::any_connection& conn, int id)
-{
- // Compose the query. May generate "SELECT * FROM user WHERE id = 10"
- auto query = mysql::format_sql(conn.format_opts().value(), "SELECT * FROM user WHERE id = {}", id);
-
- // Run it
- mysql::static_results res;
- conn.execute(query, res);
-
- // Do something with the results
-}
-```
-
-This is still verbose, and has 3 points where an error can happen:
-in `format_opts`, in `format_sql` and in `execute`.
-Handling these 3 without exceptions is much more involved.
-
-Boost 1.86 adds a new execution request, `with_params`, than can be used to simplify
-most queries:
-
-```cpp
-void lookup(mysql::any_connection& conn, int id)
-{
- // Compose the query in the client and run the request
- mysql::static_results res;
- conn.execute(mysql::with_params("SELECT * FROM user WHERE id = {}", id), res);
-
- // Do something with the results
-}
-```
-
-When executing `with_params`, the given query string is formatted
-(as per `format_sql`) and then sent to the client for execution.
-
-This is much less verbose, and gathers all possible errors under the `execute` function,
-making using exception-less code much easier. It's also more efficient, as
-the query is serialized directly to the network buffer.
-All constructs available in `format_sql` are available when using `with_params`.
-
-## Exceptions with diagnostics in async functions
-
-When using throwing sync functions, like `conn.execute("SELECT 1", res)`,
-an exception of type `mysql::error_with_diagnostics` is thrown on error.
-This is a `system::system_error`, with enhanced error information
-provided by the server. Implementing this is easy because the
-exception is thrown directly by Boost.MySQL.
-
-The async case is not that straightforward. Boost.Asio throws the exceptions
-for us, and has no way to access our diagnostics. Until Boost 1.86,
-we recommended the following:
-
-```cpp
-asio::awaitable handle_request(mysql::connection& conn)
-{
- mysql::results r;
- mysql::diagnostics diag;
- auto [ec] = co_await conn.async_execute("SELECT 1", r, diag, asio::as_tuple(asio::deferred));
- mysql::throw_on_error(ec, diag);
-}
-```
-
-This is error-prone. The new `with_diagnostics` completion token
-can be used with async throwing schemes to simplify this task:
-
-```cpp
-asio::awaitable handle_request(mysql::connection& conn)
-{
- mysql::results r;
- co_await conn.async_execute("SELECT 1", r, mysql::with_diagnostics(asio::deferred));
-}
-```
-
-Since this is a very common case, this is actually the default completion
-token for `any_connection`, and the above can be written as:
-
-```cpp
-asio::awaitable handle_request(mysql::connection& conn)
-{
- mysql::results r;
- co_await conn.async_execute("SELECT 1", r);
-}
-```
-
-Which makes coroutines in throwing schemes actually useful.
-
-I've also developed another custom completion token to aid with tests.
-I've learnt a lot and I can tell you: developing a completion token
-is not for the faint-hearted, and requires _a lot_ of testing.
-
-## Timeouts: support for asio::cancel_after, asio::cancel_at
-
-These tokens require special support from libraries to work.
-Starting with Boost 1.87, Boost.MySQL supports these tokens.
-From now on, setting a timeout to a query is as simple as:
-
-```cpp
-asio::awaitable handle_request(mysql::connection& conn)
-{
- mysql::results r;
- co_await conn.async_execute("SELECT 1", r, asio::cancel_after(std::chrono::seconds(5)));
-}
-```
-
-## Thread-safety in connection_pool
-
-`connection_pool` tried to provide easy thread-safety by using special executor
-semantics. Bug reports indicated that this is a feature that people use, but
-that the design wasn't entirely correct. Now that I've learnt more about
-Asio, executors and cancellation, I've re-written thread-safety support
-for `connection_pool`.
-
-The interface is slightly different, specifying a boolean flag to enable
-or disable it. When disabled (the default), pools exhibit the usual Asio
-executor semantics, with no overhead. When enabled, a strand is internally
-created, and cancellation signals are appropriately wired to ensure
-robust thread-safety semantics. The thread sanitizer has helped a lot
-in detecting problems.
-
-I've also removed the `connection_pool::async_get_connection` overloads
-involving timeouts. This is now better handled by `asio::cancel_after`.
-
-With these changes done, `connection_pool` is ready to commit
-for API stability in Boost 1.87.
-
-## pydocca migration
-
-The Boost.JSON author has kindly implemented a new version of the
-`docca` toolchain. It's faster, has less dependencies, and
-produces much better output.
-
-I've migrated Boost.MySQL to use it. In the process, I've learnt
-the tool internals, filed a lot of issues that have been promptly
-solved, and in general improved the quality of my reference docs.
-
-I also developed a proof-of-concept Asciidoc generator,
-based on pydocca. You can see an example of the resulting docs
-https://anarthal.github.io/pydocca-asciidoc/mysql/boost/mysql/any_connection/connect.html:[here].
-This has helped me understand the things I don't like
-about the current reference doc templates, so I can provide
-feedback on the new ones currently under development.
-
-## Modular Boost (b2)
-
-Like other libraries, Boost.MySQL has been adapted to use
-the new modular Boost infrastructure. As expected, some things
-broke, but we've managed to bring them up again.
-
-## Other contributions
-
-I have performed some other tasks in Boost.MySQL:
-
-- I've heavily refactored testing infrastructure, with increased support
- to detect non-conformities with the Asio universal model.
-- I've implemented immediate executor support in `connection` and `any_connection`.
-- I've performed the required preparation for the 1.86 Boost release.
-- I've fixed a number of other small issues.
-
-I've managed to perform small contributions to other Boost libraries, including
-Boost.Process, Boost.Redis and Boost.Pfr.
diff --git a/_posts/2024-10-21-SamsQ3Update.md b/_posts/2024-10-21-SamsQ3Update.md
deleted file mode 100644
index c38743490..000000000
--- a/_posts/2024-10-21-SamsQ3Update.md
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: sam
-title: Cloud and Infrastructure Update Q3 2024
-author-id: sam
----
-
-### Boost release process boostorg/release-tools
-
-In the previous quarter, publish_release.py included features to support the Fastly CDN at archives.boost.io. This quarter, that functionality was put into action with the release of Boost 1.86.0, and it was a chance to fine-tune and improve the script. More error checking. Adding a preflight phase to test SSH. Adjusting the publish_release.py script to stage windows executables for Tom Kent, so they are relocated to a publicly visible folder during a release.
-Generally the file upload target is AWS S3, and from there the CDN origin servers download the archives.
-Revamping build_docs scripts: add python venv to mac and windows. Support macos-14.
-Briefly investigating docca - python issue that affected boost builds.
-Deployed 24.04 version of docker image for the main boostorg/boost jobs. Fixed zip and 7z failures appearing on 24.04.
-
-### Boost website boostorg/website-v2
-
-Composed a cost inventory spreadsheet of the new infrastructure. Debugging an outage of the site that was traced back to redis -> django-health-check -> celery. Frank Wiles from Revsys ultimately solved this puzzle by adjusting a celery configuration variable.
-
-Wrote local website development bootstrap scripts that will install all prerequisites for local development and then even launch docker-compose. Versions for mac, windows, linux. Updated the corresponding documentation, such that it's equally feasible to go through the steps manually to see what's being installed and then launch docker-compose from the command-line.
-
-Researched how to selectively purge the Fastly CDN cache - and specifically applying that to /release/ on boost.io.
-
-### Mailman project
-
-Revising runbook (steps to go-live in the future).
-Incorporated Greg's changes to install a boost.io header on the mailing lists. Reduced and consolidated those files. Created ansible deployment for that feature. Now temporarily removing customizations before deployment. They may be returned later.
-Upgraded the operating system on all mm3 test instances. Tested. Switched the search engine from Elastic Search to Xapian, which is better supported in terms of the Django modules.
-Submitted upstream pull requests (merged) which
-- document an improved Xapian installation method
-- further document how the core and web components interact in terms of the db
-
-### Slack
-
-Discussing slackbot implementations with Kenneth.
-
-### wowbagger
-
-Documented and published issues about the various problems on the legacy web server.
-Contributed to Kenneth's docker-compose strategy for the original boost.org website to allow local development via docker-compose, by downloading boost archives so the local environment is functional.
-Worked with Rob to include Plausible analytics on boost.org.
-
-### Jenkins
-
-Investigate/repair doc builds of mrdocs, http_proto.
-Modify doc builds of beast, url.
-Generated PR doc builds of safe-cpp. Added a GHA step to render the html upon commit.
-Upgraded the Jenkins executable, and plugins.
-Set up previews of boostlook. master/develop and PRs.
-
-### JSON Benchmarks
-
-After experimenting on a Hetzner server, switched JSON Benchmarks to a new Xeon processor from KnownHost. Intel core processors are aimed at the consumer market while Xeon is a server architecture and is more consistent when running benchmark tests. Replaced Jenkins runner, canceled previous server.
-
-### GHA
-
-Debugging certain boost library jobs. Also, with the hosted runners, determined there was a systematic problem that the bootprocess was timing out too quickly. Adjusted terraform settings and redeployed. Would be good to propose a PR upstream to terraform: the timeout is too fast.
-Enabled billing for math-cuda gpu tests.
-Macminivault billing issues.
-Resizing terraform runner Windows 2022, to add 30GB more disk space, and Pagefile (memory).
-Built new Ubuntu runners, newer kernel, adjusting OS versions on boostorg/unordered GHA to fix sanitizers.
-
-### Drone
-
-Assisted developers in debugging jobs.
-Scripted docker image cleanup on drone instances.
-Installed a cron job to clear the autoscaler, solving an issue that occasionally jobs get stuck in pending mode, preventing the instances from scaling.
diff --git a/_posts/2024-10-25-FernandoQ3Update.md b/_posts/2024-10-25-FernandoQ3Update.md
deleted file mode 100644
index 668d42365..000000000
--- a/_posts/2024-10-25-FernandoQ3Update.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: fernando
-title: Fernando's Q3 2024 Update
-author-id: fernando
----
-
-During this quarter, I have continued to dedicate my efforts to the development of MrDocs, a tool aimed at revolutionizing the generation of reference documentation from C++ code and javadoc comments. My focus has been on expanding its capabilities to solidify its position as the future of documentation in C++.
-
-### Advances in MrDocs Development
-
-This period has witnessed several significant improvements aimed at positioning MrDocs as a leading tool in documentation generation for C++. My key contributions include:
-
-- **Tagfiles Generation**: Implementation of tagfiles generation to facilitate cross-referencing in Doxygen format, significantly improving MrDocs' integration capability with other documentation systems.
-- **Improvements in Error and Warning Presentation**: Optimization of how MrDocs presents errors and warnings to the user, thereby enhancing the tool's usability and facilitating the resolution of issues within documented projects.
-- **Template Optimization**: Work on general improvements to AsciiDoc templates and continuous efforts to keep HTML templates aligned with them, ensuring consistency and quality in documentation outputs.
-
-These initiatives are crucial for establishing MrDocs as an advanced solution in the technical documentation field and further strengthening its competitive position in the market.
-
-### Reflections on Remote Collaboration and Open Source Contribution
-
-Remote collaboration on these projects, across different time zones, continues to present unique challenges and valuable opportunities. The collaborative nature of open-source work with the C++ Alliance enriches my professional experience and significantly contributes to my personal development.
-
-### Looking Forward
-
-I am committed to deepening my involvement in the C++ ecosystem through the C++ Alliance. In the coming months, I aspire to take on the maintenance of Boost libraries that align with my areas of expertise: numerics, cryptography, algorithms, and data structures. I firmly believe that we can revitalize Boost and reaffirm it as an indispensable tool for C++ programmers around the world.
-
-The C++ Alliance is doing an exceptional job of providing support and creating opportunities for Boost to continue evolving. I am excited to be part of the initiatives of the C++ Alliance, whose direction and support have been fundamental in revitalizing Boost. With the momentum of the alliance, we can proclaim: **Make Boost Great Again!** This achievement is not just a testament to individual commitment but a manifestation of the effective leadership and strategic vision of the C++ Alliance, which elevates the entire C++ programming ecosystem.
-
-### Acknowledgments
-
-I want to express my profound gratitude to the C++ Alliance for giving me the opportunity to significantly contribute to the development of Boost. The alliance's dedication to propelling Boost not only revitalizes this key tool but also strengthens the entire C++ ecosystem. Their steadfast support is crucial for Boost to adapt and overcome the challenges of modern programming, benefiting millions of developers around the world. I am excited about the future innovations we can achieve together on this journey towards continuous improvement and programming excellence.
-
-![Make Boost Great Again!](/images/posts/fernando/mbga-cap.png)
\ No newline at end of file
diff --git a/_posts/2024-10-25-KrystiansQ3Update.md b/_posts/2024-10-25-KrystiansQ3Update.md
deleted file mode 100644
index e9d6cbd86..000000000
--- a/_posts/2024-10-25-KrystiansQ3Update.md
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: krystian
-title: Krystian's Q3 2024 Update
-author-id: krystian
----
-
-Throughout Q3 2024, my work was primarily focused on two projects: MrDocs, and Clang.
-
-# MrDocs
-
-Most of my work in MrDocs was centered around fixing bugs and refactoring. To that end, I resolved numerous bugs, mostly relating to AST extraction. On the refactoring side of things, I (finally) removed the bitcode serialization component from MrDocs. This _greatly_ simplifies the project architecture and eliminates most of the boilerplate that was needed when modifying the representation used by MrDocs to represent AST nodes.
-
-## Supporting Concepts and Constraints
-
-In addition to housekeeping tasks, I added support for concepts (and constraints) to MrDocs! Although the implementation is still in its infancy, all kinds of possible constraints are supported.
-
-MrDocs relies on the Clang USR (universal symbol reference) generator to create unique identifiers for C++ entities. Since the Clang USR generator does not support constrained declarations, the implementation of concepts requires additional data appended as to uniquely identify declarations which only differ in constraints. For example:
-
-```cpp
-template
-void f() requires (N <= 4);
-
-template
-void f() requires (N > 4);
-```
-
-In the interest of saving time, the "extra data" appended to the USR is obtained by computing the "ODR hash" of the _constraint-expressions_ from each function. For context, an "ODR hash" is a hash value used by Clang to identify ODR violations when using modules. Despite it working for trivial cases like the one above, relying on the ODR hash results in more problems, leading us to what I've been working on in Clang.
-
-# Clang
-
-In Clang, template parameters are identified via *depth* and *index*. Across redeclarations, the depth of template parameters may vary. Consider the following:
-
-```cpp
-template
-struct A
-{
- template
- void f() requires U; // #1
-};
-
-template<>
-template
-void A::f() requires U; // #2
-```
-
-In `#1`, the depth of `U` is `1`. In `#2`. the depth of `U` is `0`. If we compared the _constraint-expressions_ of the trailing _requires-clauses_ of `#1` and `#2` as written, we would deem them not equivalent, which is obviously incorrect! So, before we compare the _constraint-expressions_, we must first adjust the depths of any referenced template parameters (which Clang already does). Let's see what happens when we compile with Clang 19:
-
-> error: out-of-line declaration of 'f' does not match any declaration in 'A'
-
-.. it doesn't work! This begs the question... why doesn't it work?
-
-
-In C++, there are three constructs for which instantiation is deferred:
-- _noexcept-specifiers_,
-- default arguments, and
-- constraints
-
-Therefore, we must know the template arguments of any enclosing templates when substituting into these constructs. Of these, the most problematic is constraints, as they affect declaration matching. This led me to [write a patch](https://github.com/llvm/llvm-project/pull/106585) which ensures Clang will always collect the right set of template arguments for any enclosing templates. This not only resolved the USR generation problems in MrDocs, but also fixed significant number of declaration matching issues in Clang.
diff --git a/_posts/2024-10-25-MohammadsQ3Update.md b/_posts/2024-10-25-MohammadsQ3Update.md
deleted file mode 100644
index 4ed17b436..000000000
--- a/_posts/2024-10-25-MohammadsQ3Update.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: mohammad
-title: Boost.Http.Proto Project Highlights
-author-id: mohammad
----
-
-Here’s a look at some recent projects I've been focusing on:
-
-### Boost.Http.Proto
-
-#### Parsing Chunked Bodies
-
-The `http_proto::parser` uses a circular buffer internally, which sometimes causes HTTP message bodies to span across two buffers. Previously, this required copying data into a temporary buffer for chunked parsing, ensuring continuous memory access. To address this, I introduced a `chained_sequence` abstraction, which lets two buffers appear as a single, contiguous buffer without the need for copying. This approach streamlines the parser implementation and improves efficiency by reducing memory operations. Iterating over `chained_sequence` is nearly as fast as iterating over a single range because it requires only a single comparison per iteration.
-
-#### gzip/deflate Support
-
-One goal for `Http.Proto` is to offer optional support for compression algorithms like gzip, deflate, and brotli, keeping dependencies optional. This allows flexibility, as users may not need compression or might lack libraries like Zlib on their platform. To enable this, we introduced an optional Zlib interface within `http_proto`, allowing gzip/deflate support in `http_proto::parser` without mandatory linking. Now, the parser can read the `Content-Encoding` field and apply the necessary decoding if a suitable compression service is available.
-
-### Boost.Http.Io
-
-Following updates in `Http.Proto`, I refactored the [C++20 coroutine client example](https://github.com/cppalliance/http_io/blob/develop/example/client/flex_await) in `Http.Io`. The client now requests with `Accept-Encoding: gzip, deflate` and decodes responses accordingly. It also includes basic redirect support and streams the response body piece by piece to standard output, allowing it to handle large file downloads.
-
-### Boost.Beast
-
-Alongside routine bug fixes and responses to user-reported issues, here are a few notable changes in the Boost.Beast project:
-
-- **`beast::basic_parser` Enhancement**: A new `trailer_fields` state was added for parsing trailer fields in chunked HTTP message bodies. This state respects user-defined `header_limit` settings and provides appropriate error messages when limits are exceeded.
-- **Error Handling in `basic_fields`**: `basic_fields` interfaces now include an overload that accepts a `system::error_code` instead of throwing exceptions. This enables parsers to report insertion errors directly to the caller.
-- **`skip_` Variable Removal**: The parser previously used a state variable `skip_` to track parsed characters in `CRLF` processing within chunk headers. Benchmarks showed that removing `skip_` improves performance, as the parser can find `CRLF` directly within the buffer. This change has also simplified the parser's implementation.
-- **Forward-Declared Headers for `beast::http`**: New forward-declared headers are now available for all types in the `beast::http` namespace, enabling users to separate implementation and usage sites more effectively by including only the necessary `*_fwd` headers.
diff --git a/_posts/2024-10-25-PeterTurcan-Q3-2024.md b/_posts/2024-10-25-PeterTurcan-Q3-2024.md
deleted file mode 100644
index cff1ae8fd..000000000
--- a/_posts/2024-10-25-PeterTurcan-Q3-2024.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: peter
-title: Peter Turcan Q3 2024 - From Murky to Clear - Shedding Light on the Foggy Bits – User Guide and Contributor Guide Status Update
-author-id: peter
-author-name: Peter Turcan
----
-
-- Authored and published a new topic on Debug Visualizers in the Contributor Guide. Debug Visualizers offer a powerful way to simplify the debugging process by allowing developers to see complex data structures in a more understandable format. The two main sections are **Debug Visualizers in MSVC** (Microsoft's built in visualizers for Visual Studio) - utilizing the Natvis display language, and **Debug Visualizers in GDB** (the visualizers for the GNU Debugger) - utilizing Python templates to create _pretty-printers_. To quote the article _"Debug Visualizers are particularly useful in scenarios where data structures are complex and difficult to interpret from raw memory views."_ The article includes sample code for visualizing simpler libraries like **boost::optional**, and more challenging scenarios from **boost::asio**. Whether you’re new to debugging or an experienced developer, taking the time to master these tools will pay off big-time in the long run!
-
-- At the other end of the technical spectrum from Debug Visualizers, I added a User Guide Glossary, with 140 terms, mostly acronyms found in our Slack channel conversations. This task came from a user who became befuddled over some of the terms we use freely in our Slack conversations, but are not always widely understood. These acronyms vary from the conversational "AFAIK" (as far as I know) through to technical shorthand, such as "XSS" (cross-side scripting, a security vulnerability), through to some of the esoteric Boost terms, such as "uBlas" for Basic Linear Algebra.
-
-- FAQs are often frequently visited web pages, and I added a section on the Standard Library to the User Guide FAQ. There is often some confusion over whether to use a Boost or Standard library. This FAQ section does not put this to rest indefinitely, as both library collections evolve, but should help describe the issues and what to consider when deciding between a Standard or Boost library.
-
-- On a similar vein, updated various other sections of the User Guide FAQ, and Contributor Guide FAQ, as questions and answers become available following Slack or email conversations on the various Boost channels and forums.
-
-- As the size of the User Guide and Contributor Guide grow, navigation also grows in importance - especially with long topics. Adding better linking and table of contents and See Also sections helps improve the user experience.
-
-- As C++ faces some challenges due to its lack of memory safety, such as the language RUST, it was educational for me to read and provide feedback on a Safe C++ paper due to be presented at a conference. Safe C++ is a complex and detailed proposal to right what I see as a historical wrong (though other developers might describe it as "freedom") and address the memory, type, and exception safety issues that have inadvertently enabled security threats.
-
diff --git a/_posts/2024-10-25-dmitrys-q3-update.md b/_posts/2024-10-25-dmitrys-q3-update.md
deleted file mode 100644
index 05a7170a6..000000000
--- a/_posts/2024-10-25-dmitrys-q3-update.md
+++ /dev/null
@@ -1,102 +0,0 @@
----
-layout: post
-nav-class: dark
-categories: dmitry
-title: How to Get More Utility from the Debugger in CI
-author-id: dmitry
-author-name: Dmitry Arkhipov
----
-
-While some of my work in the third quarter of this year was dedicated to more
-work on Boost.JSON and Docca, the most interesting thing was definitely
-[pretty_printers](https://github.com/cppalliance/pretty_printers), a collection
-of utilities and build scripts which help dealing with debugger pretty printers
-and visualisers. Although currently it only supports
-[GDB](https://www.sourceware.org/gdb/), I'm planning to research
-[LLDB](https://lldb.llvm.org/) and
-[Natvis](https://learn.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects?view=vs-2022)
-integration too.
-
-The module naturally emerged from my work on GDB pretty printers for
-Boost.JSON. Even if you don't know what pretty printers are, you can probably
-guess just by their name: they are helpers that tell the debugger how to output
-objects of a particular type. These days standard libraries come with such
-helpers, and so when we try printing a container, we get useful information
-instead of unintelligible gibberish. If we provide similar helpers for our
-libraries we can significantly improve debugging experience for our users.
-
-But writing the helpers is only one half of the task. The other half is getting
-the debugger to actually load them. Let's look at the options GDB provides us
-for this.
-
-1. The user can manually load an extension that contains our helpers from the
- initialisation file.
-2. The debugger can automatically load the extension that matches the name of a
- binary that it loads (either a program or a shared library).
-3. The debugger can load the extension from a special section in the loaded
- binary itself.
-
-Option 1 is the most straightforward, and is also the least exciting. Option 2
-is actually the one standard libraries go for. But there is a fundamental
-problem with it: it doesn't work for static libraries let alone header-only
-ones. A static library is never a binary loaded by the debugger, and the
-extension file name has to match the name of a loaded binary. Header-only
-libraries don't have a corresponding binary at all. The reason it works so well
-for standard libraries is that people very rarely link to them statically when
-they are actually working on their code, which is when they use a debugger.
-
-This leaves option 3: putting the extension into the binary. GDB documentation
-[explains how to do it](https://sourceware.org/gdb/current/onlinedocs/gdb.html/dotdebug_005fgdb_005fscripts-section.html).
-The catch is that the extension file needs to be preprocessed to effectively
-become an assembler command. This can be automated, though. In August Niall
-Douglas [posted on the Boost developers' mailing list](https://lists.boost.org/Archives/boost/2024/08/257480.php)
-about his and Braden Ganetsky's work on a script that does such preprocessing
-of a GDB extension file for his library. At that point I have experimented a
-little bit with such embedding and concluded that this is as good as it gets
-with pretty printers deployment. So, the first component of `pretty_printers`
-is a script that takes a GDB Python extension file and produces a C file
-suitable for embedding into a binary.
-
-But that's not all. In the same mailing list post Niall mentions that the
-reason Braden has collaborated with him was bugs he found in the embedding.
-This leads us to testing. Boost.JSON is quite rigorously tested. This has been
-made possible largely thanks to the C++ Alliance Drone instance. After I
-initially wrote GDB pretty printers for Boost.JSON I immediately started
-looking for a way to test them. The aforementioned mailing list post shows that
-my concern wasn't a purely theoretical one.
-
-After some research I discovered that with certain flags GDB can be run as a
-Python interpreter. Hence my original idea for testing pretty printers: a C++
-program that sets up objects to print, and an accompanying Python script that
-tells GDB where to set breakpoints and what expressions to print, and compares
-the output with the expected strings. But I realised that keeping the two files
-in sync becomes rather unwieldy very quickly. That led to take 2: put the tests
-in the comments of the C++ test program, and generate a corresponding Python
-script from it. Not only it resulted in the tests immediately following the
-lines creating the objects used in those tests, it also allowed the support for
-putting tests in functions, loops, and spreading them across multiple files.
-The utility that generates such Python script is the second component of
-`pretty_printers`.
-
-This concludes the story of the two utilities contained in `pretty_printers`.
-The other important component of this module is the support for CMake and B2
-build systems. The support doesn't simply include some boilerplate. The testing
-function also tells GDB to whitelist the directory where the tested binary is
-located, so that the extensions are loaded. Otherwise the user would have to do
-it manually, which is particularly annoying in CI.
-
-After I finished the work on the module, I decided that other libraries could
-benefit from it. It was suggested to me that I should submit the module for
-review to the Boost community. Previously there hasn't been any Boost tool
-reviews, but the Boost community was positive to the idea. I find this to be
-a very exciting development.
-
-Another exciting idea I had is to research other potential debugger helpers,
-unrelated to pretty printing and visualisation. For example, GDB allows
-extensions to register custom commands. There's also the possibility of
-orchestrating GDB to analyse a specific situation. E.g. put a breakpoint on
-this line, but only after another line was hit. While locally this is easily
-done manually, such functionality can be useful when the error only manifests
-on a platform you only have access to in CI. Such ideas hint that
-`pretty_printers` is a misnomer, and the module should be called something
-different. Maybe `debugger_utils`?
diff --git a/_sass/base/config.scss b/_sass/base/config.scss
deleted file mode 100644
index 575b1886b..000000000
--- a/_sass/base/config.scss
+++ /dev/null
@@ -1,85 +0,0 @@
-// Fonts
-$font-family--roboto : "Roboto", sans-serif;
-
-$font-size--primary : 16px;
-
-// General Colors
-$color--black : #000000;
-$color--faded-black : rgba(25,25,25,0.7);
-$color--white : #FFFFFF;
-$color--gold : #C38750;
-$color--light : #F7F7F7;
-$color--gray : #ECECEC;
-$color--stone : #9A9A9A;
-$color--gray-dark : #242424;
-$color--slate : #E1DFDF;
-
-// Transitions
-$transition--primary : all 0.3s ease;
-$transition--secondary : all 0.4s cubic-bezier(0.2, 1, 0.3, 1);
-$transition-fast : all 0.2s ease;
-$transition-slow : all 0.8s ease;
-
-// Responsive
-$phone-small : 320px;
-$phone-medium : 480px;
-$phone-large : 512px;
-$phone-xlarge : 640px;
-$tablet : 768px;
-$desktop-small : 1024px;
-$desktop-medium : 1280px;
-$desktop-large : 1440px;
-$mobile : $desktop-small;
-
-// General dimensions and spacing
-$spacing--xxs : 5px;
-$spacing--xs : 10px;
-$spacing--s : 15px;
-$spacing--m : 20px;
-$spacing--l : 25px;
-$spacing--xl : 30px;
-$spacing--xxl : 45px;
-
-// Z-index
-$z-below-bottom : -1;
-$z-bottom : 0;
-$z-middle-bottom : 5;
-$z-middle : 10;
-$z-middle-top : 15;
-$z-top : 1000;
-$z-above-top : 1005;
-
-$primary-color: #6f777d !default;
-$gray: #7a8288 !default;
-$dark-gray: mix(#000, $gray, 50%) !default;
-$darker-gray: mix(#000, $gray, 60%) !default;
-$light-gray: mix(#fff, $gray, 50%) !default;
-$lighter-gray: mix(#fff, $gray, 90%) !default;
-$border-color: $lighter-gray !default;
-$info-color: #3b9cba !default;
-
-/* links */
-$link-color: mix(#000, $info-color, 20%) !default;
-$link-color-hover: mix(#000, $link-color, 25%) !default;
-$link-color-visited: mix(#fff, $link-color, 15%) !default;
-
-/* YIQ color contrast */
-$yiq-contrasted-dark-default: $dark-gray !default;
-$yiq-contrasted-light-default: #fff !default;
-$yiq-contrasted-threshold: 175 !default;
-$yiq-debug: false !default;
-
-$text-color: $dark-gray !default;
-$muted-text-color: mix(#fff, $text-color, 20%) !default;
-
-$border-radius: 4px !default;
-
-/* type scale */
-$type-size-1: 2.441em !default; // ~39.056px
-$type-size-2: 1.953em !default; // ~31.248px
-$type-size-3: 1.563em !default; // ~25.008px
-$type-size-4: 1.25em !default; // ~20px
-$type-size-5: 1em !default; // ~16px
-$type-size-6: 0.75em !default; // ~12px
-$type-size-7: 0.6875em !default; // ~11px
-$type-size-8: 0.625em !default; // ~10px
\ No newline at end of file
diff --git a/_sass/base/functions.scss b/_sass/base/functions.scss
deleted file mode 100644
index 21c8aa582..000000000
--- a/_sass/base/functions.scss
+++ /dev/null
@@ -1,63 +0,0 @@
-// Mixins
-@mixin breakpoint($param1, $param2: false) {
-
- $declaration: "only screen and (min-width: #{$param1})";
-
- @if ($param2 != false) {
- $declaration: "only screen and (min-width: #{$param1}) and (max-width: #{$param2})";
- }
-
- @media #{$declaration} {
- @content;
- }
-
-}
-
-@mixin clearfix {
- clear: both;
-
- &::after {
- clear: both;
- content: "";
- display: table;
- }
-}
-
-/*
- Compass YIQ Color Contrast
- https://github.com/easy-designs/yiq-color-contrast
- ========================================================================== */
-
-@function yiq-is-light(
- $color,
- $threshold: $yiq-contrasted-threshold
-) {
- $red: red($color);
- $green: green($color);
- $blue: blue($color);
-
- $yiq: (($red*299)+($green*587)+($blue*114))/1000;
-
- @if $yiq-debug { @debug $yiq, $threshold; }
-
- @return if($yiq >= $threshold, true, false);
-}
-
-@function yiq-contrast-color(
- $color,
- $dark: $yiq-contrasted-dark-default,
- $light: $yiq-contrasted-light-default,
- $threshold: $yiq-contrasted-threshold
-) {
- @return if(yiq-is-light($color, $threshold), $yiq-contrasted-dark-default, $yiq-contrasted-light-default);
-}
-
-@mixin yiq-contrasted(
- $background-color,
- $dark: $yiq-contrasted-dark-default,
- $light: $yiq-contrasted-light-default,
- $threshold: $yiq-contrasted-threshold
-) {
- background-color: $background-color;
- color: yiq-contrast-color($background-color, $dark, $light, $threshold);
-}
\ No newline at end of file
diff --git a/_sass/base/print.scss b/_sass/base/print.scss
deleted file mode 100644
index 5f2cf0147..000000000
--- a/_sass/base/print.scss
+++ /dev/null
@@ -1,5 +0,0 @@
-@media print {
- nav.nav, section#news, footer.footer {
- display: none;
- }
-}
\ No newline at end of file
diff --git a/_sass/base/reset.scss b/_sass/base/reset.scss
deleted file mode 100644
index e9695fce9..000000000
--- a/_sass/base/reset.scss
+++ /dev/null
@@ -1,56 +0,0 @@
-html, body, div, span, applet, object, iframe,
-h1, h2, h3, h4, h5, h6, p, figure, blockquote, pre,
-a, abbr, acronym, address, big, cite, code,
-del, dfn, em, font, ins, kbd, q, s, samp,
-small, strike, strong, sub, sup, tt, var,
-dl, dt, dd, ol, ul, li,
-fieldset, form, label, legend,
-table, caption, tbody, tfoot, thead, tr, th, td {
- border: 0;
- font-family: inherit;
- font-size: 100%;
- font-style: inherit;
- font-weight: inherit;
- margin: 0;
- outline: 0;
- padding: 0;
- vertical-align: baseline;
-}
-:focus {
- outline: 0;
-}
-body {
- background: #FFF;
- line-height: 1;
-}
-ol,
-ul {
- list-style: none;
-}
-table {
- border-collapse: separate;
- border-spacing: 0;
-}
-caption,
-th,
-td {
- font-weight: normal;
- text-align: left;
-}
-blockquote:before,
-blockquote:after,
-q:before,
-q:after {
- content: "";
-}
-blockquote,
-q {
- quotes: "" "";
-}
-a img {
- border: 0;
-}
-article, aside, details, figcaption, figure,
-footer, header, hgroup, menu, nav, section {
- display: block;
-}
\ No newline at end of file
diff --git a/_sass/base/text.scss b/_sass/base/text.scss
deleted file mode 100644
index b1f7a1adb..000000000
--- a/_sass/base/text.scss
+++ /dev/null
@@ -1,227 +0,0 @@
-// General
-html {
- font-size: $font-size--primary;
-}
-
-body {
- background: $color--white;
- color: $color--black;
- font-family: $font-family--roboto;
- font-size: $font-size--primary;
- font-weight: 400;
- margin: 0;
- overflow-x: hidden;
- -webkit-font-smoothing: antialiased;
- -moz-osx-font-smoothing: grayscale;
-}
-
-img {
- max-width: 100%;
-}
-
-
-// Base Text Styles
-
-.text-xxl { /* hero main header */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--gold;
- line-height: 1;
- letter-spacing: 0.02em;
- font-size: 2.5rem; /* 40px */
-
- @include breakpoint($desktop-small) {
- font-size: 5rem; /* 80px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 7.5rem; /* 120px */
- }
-}
-
-.text-xl { /* hero subheader, section header */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--black;
- line-height: 1;
- letter-spacing: 0.02em;
- font-size: 1.875rem; /* 30px */
-
- @include breakpoint($desktop-small) {
- font-size: 2.5rem; /* 40px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 3.75rem; /* 60px */
- }
-}
-
-.text-l { /* link lists */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--black;
- line-height: 1.33;
- letter-spacing: 0.02em;
- font-size: 1.25rem; /* 20px */
-
- @include breakpoint($desktop-large) {
- font-size: 2.063rem; /* 33px */
- }
-}
-
-.text-m { /* section subheader */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--black;
- line-height: 1.2;
- letter-spacing: 0.02em;
- font-size: 1.25rem; /* 20px */
-
- @include breakpoint($desktop-large) {
- font-size: 1.875rem; /* 30px */
- }
-}
-
-.text-s { /* dates */
- font-family: $font-family--roboto;
- font-weight: 400;
- color: $color--gold;
- line-height: 1.7;
- letter-spacing: 0.02em;
- font-size: 0.875rem; /* 14px */
-
- @include breakpoint($desktop-large) {
- font-size: 1.625rem; /* 26px */
- }
-}
-
-.text-xs { /* team member titles */
- font-family: $font-family--roboto;
- font-weight: 400;
- color: $color--gold;
- line-height: 1;
- letter-spacing: 0.02em;
- font-size: 0.875rem; /* 14px */
-
- @include breakpoint($desktop-large) {
- font-size: 1.5rem; /* 24px */
- }
-}
-
-.text-xxs { /* paragraphs */
- font-family: $font-family--roboto;
- font-weight: 400;
- color: $color--black;
- line-height: 1.25;
- letter-spacing: 0.02em;
- font-size: 0.875rem; /* 14px */
-
- @include breakpoint($desktop-small) {
- font-size: 0.9rem;
- }
-
- @include breakpoint($desktop-large) {
- line-height: 1.5;
- font-size: 1rem; /* 16px */
- }
-
- a {
- font-weight: 700;
- text-decoration: underline;
- color: $color--black;
- }
-}
-
-
-// Specialty Text Styles
-
-.header-shadow { /* faded section header */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--light;
- line-height: 1;
- letter-spacing: 0.02em;
- font-size: 3.625rem; /* 58px */
-
- &.dark {
- color: $color--gray-dark;
- }
-
- @include breakpoint($desktop-small) {
- font-size: 5.813rem; /* 93px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 8.75rem; /* 140px */
- }
-}
-
-.all {
- font-size: 0.875rem;
-
- @include breakpoint($desktop-small) {
- font-size: 0.9rem;
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1rem;
- }
-}
-
-.formatted-text {
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--m;
- }
-
- p,
- li {
- margin-bottom: $spacing--s;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--s;
- }
-
- &:last-child {
- margin-bottom: 0;
- }
- }
-
- li {
- position: relative;
- padding-left: $spacing--l;
-
- @include breakpoint($desktop-large) {
- padding-left: 35px;
- }
- }
-
- li:before {
- content: '•';
- color: $color--gold;
- font-size: 2em;
- position: absolute;
- top: 9px;
- left: 0;
- line-height: 0;
-
- @include breakpoint($desktop-small) {
- top: 8px;
- }
-
- @include breakpoint($desktop-large) {
- top: 12px;
- }
- }
-}
-
-a {
- text-decoration: none;
-}
-
-.link {
- text-decoration: underline;
-}
-
-strong {
- font-weight: 700;
-}
diff --git a/_sass/modules/card.scss b/_sass/modules/card.scss
deleted file mode 100644
index dcd1d9a4f..000000000
--- a/_sass/modules/card.scss
+++ /dev/null
@@ -1,39 +0,0 @@
-.card {
- display: block;
-
- .card-img-wrapper {
- width: 100%;
- padding-bottom: 84%;
- position: relative;
- }
-
- .card-img {
- object-fit: cover;
- position: absolute;
- height: 100%;
- width: 100%;
- }
-
- .card-text {
- height: 95px;
- width: 100%;
- background: $color--white;
- display: flex;
- align-items: center;
- justify-content: center;
- flex-direction: column;
- margin-top: -$spacing--xxs;
-
- @include breakpoint($desktop-small) {
- height: 105px;
- }
-
- @include breakpoint($desktop-large) {
- height: 140px;
- }
- }
-
- .card-title {
- margin-bottom: $spacing--xxs;
- }
-}
\ No newline at end of file
diff --git a/_sass/modules/footer.scss b/_sass/modules/footer.scss
deleted file mode 100644
index 8829f46b6..000000000
--- a/_sass/modules/footer.scss
+++ /dev/null
@@ -1,59 +0,0 @@
-.footer {
- height: 85px;
- display: flex;
- justify-content: center;
- align-items: center;
- background: $color--black;
- flex-direction: column;
-
- @include breakpoint($desktop-small) {
- height: 130px;
- }
-
- @include breakpoint($desktop-large) {
- height: 200px;
- }
-
- .email {
- vertical-align: middle;
- width: 104px;
-
- @include breakpoint($desktop-small) {
- width: 133px;
- }
- @include breakpoint($desktop-large) {
- width: 157px;
- }
- }
-
- .footer-text,
- a {
- font-size: 0.625rem; /* 10px */
- color: $color--white;
- font-weight: 400;
-
- @include breakpoint($desktop-small) {
- font-size: 0.75rem; /* 12px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1rem; /* 12px */
- }
- }
-
- .footer-text {
- text-align: center;
- }
-
- a {
- font-weight: 700;
- }
-
- .line {
- display: block;
-
- @include breakpoint($desktop-small) {
- display: inline-block;
- }
- }
-}
diff --git a/_sass/modules/hero.scss b/_sass/modules/hero.scss
deleted file mode 100644
index 5bf79373c..000000000
--- a/_sass/modules/hero.scss
+++ /dev/null
@@ -1,46 +0,0 @@
-.hero {
- height: calc(100vh);
- width: 100vw;
- background: url('/images/hero-bg-mobile.png') no-repeat center/cover;
- /*background: url('/images/hero-bg-mobile.png') no-repeat center/cover;*/ /* will need to update for prod */
- position: relative;
-
- @include breakpoint($desktop-small) {
- height: 670px;
- background: url('/images/hero-bg.png') no-repeat center/cover;
- /*background: url('/images/hero-bg.png') no-repeat center/cover;*/ /* will need to update for prod */
- }
-
- @include breakpoint($desktop-large) {
- height: 800px;
- }
-
- .titles {
- position: absolute;
- left: 50%;
- top: calc(50% - 25px);
- width: 100%;
- text-align: center;
- transform: translate(-50%, -50%);
-
- @include breakpoint($desktop-small) {
- top: 50%;
- }
- }
-
- .main-header {
- margin-bottom: $spacing--xs;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--m;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: $spacing--xxs;
- }
- }
-
- .sub-header {
- color: $color--white;
- }
-}
\ No newline at end of file
diff --git a/_sass/modules/layout.scss b/_sass/modules/layout.scss
deleted file mode 100644
index aeca44855..000000000
--- a/_sass/modules/layout.scss
+++ /dev/null
@@ -1,357 +0,0 @@
-#body {
- &.locked {
- overflow: hidden;
- }
-}
-
-.content {
- padding: $spacing--s 0;
-
-
- @include breakpoint($desktop-small) {
- padding: $spacing--xxl 0;
- }
-
- @include breakpoint($desktop-large) {
- padding: 60px 0;
- }
-}
-
-.section {
- padding: 50px $spacing--l;
-
-
- @include breakpoint($desktop-small) {
- padding: 60px 100px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 90px 245px;
- }
-
- .section-title {
- position: relative;
- display: inline-block;
- margin-bottom: $spacing--xs;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--m;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 35px;
- }
- }
-
- .header {
- color: $color--gold;
- position: relative;
- z-index: $z-top;
- margin-left: -1rem;
- margin-right: -1rem;
- padding: .4rem 1rem .1rem;
-
- &:hover {
- a::before {
- visibility: visible;
- }
- }
-
- a {
- position: absolute;
- text-decoration: none;
- width: 1.75ex;
- margin-left: -1.5ex;
- font-size: .8em;
- font-weight: 400;
- padding-top: .05em;
-
- &::before {
- visibility: hidden;
- color: #A68A74;
- content: "\00a7";
- }
- }
- }
-
- .header-shadow {
- position: absolute;
- bottom: 30%;
- left:calc(50% + 30px);
- transform: translateX(-50%);
-
- &.center {
- left: 50%;
- }
- }
-
- .section-subheader {
- margin-bottom: $spacing--s;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--l;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 35px;
- }
- }
-}
-
-/* for items that must wrap */
-.grid {
- display: flex;
- flex-wrap: wrap;
-}
-
-/* for items that consistently stay in a row */
-.row {
- &.row-sm {
- display: flex;
- }
-
- @include breakpoint($desktop-small) {
- display: flex;
- }
-}
-
-.col-half {
- width: 100%;
-
- @include breakpoint($desktop-small) {
- width: 50%;
- display: inline-block;
- vertical-align: top;
- }
-}
-
-/* 1 column mobile - 2 column tablet - 3 column desktop */
-.col-third {
- margin-bottom: 35px;
- vertical-align: top;
- width: 100%;
-
- @include breakpoint($tablet) {
- width: calc(50% - 17px);
- padding: 0 17px;
- display: inline-block;
-
- &:nth-child(odd) {
- padding-left: 0;
- }
-
- &:nth-child(even) {
- padding-right: 0;
- }
- }
-
- @include breakpoint($desktop-small) {
- width: calc(33.33% - 16px);
- display: inline-block;
- vertical-align: top;
- padding: 0 12px;
- margin-bottom: $spacing--l;
-
- &:nth-child(odd),
- &:nth-child(even) {
- padding: 0 12px;
- }
-
- &:nth-child(3n+1) {
- padding-left: 0;
- }
-
- &:nth-child(3n+3) {
- padding-right: 0;
- }
- }
-
- @include breakpoint($desktop-large) {
- padding: 0 17px;
- margin-bottom: 35px;
- width: calc(33.33% - 23px);
-
- &:nth-child(odd),
- &:nth-child(even) {
- padding: 0 17px;
- }
-
- &:nth-child(3n+1) {
- padding-left: 0;
- }
-
- &:nth-child(3n+3) {
- padding-right: 0;
- }
- }
-}
-
-.col-fourth {
- width: 100%;
-
- &.col-fourth-sm {
- width: 25%;
- display: inline-block;
- vertical-align: top;
-
- &:first-child {
- padding-left: 0;
- }
-
- &:last-child {
- padding-right: 0;
- }
- }
-
- @include breakpoint($tablet) {
- width: calc(50% - 20px);
- display: inline-block;
- padding: 0 $spacing--m;
-
- &:first-child,
- &:nth-child(3) {
- padding-left: 0;
- }
-
- &:nth-child(2),
- &:last-child {
- padding-right: 0;
- }
- }
-
- @include breakpoint($desktop-small) {
- width: calc(25% - 83px);
- display: inline-block;
- vertical-align: top;
- padding: 0 55px;
-
- &:nth-child(2),
- &:nth-child(3) {
- padding: 0 55px;
- }
-
- &:first-child {
- padding-left: 0;
- }
-
- &:last-child {
- padding-right: 0;
- }
- }
-}
-
-.generated-content {
- h1 {
- font-weight: 700;
- font-size: 1.25rem; /* 20px */
- padding: 10px 0;
-
- @include breakpoint($desktop-small) {
- font-size: 2rem; /* 32px */
- }
- }
- h2 {
- font-weight: 700;
- font-size: 1.125rem; /* 18px */
- padding: 10px 0;
-
- @include breakpoint($desktop-small) {
- font-size: 1.75rem; /* 28px */
- }
- }
- h3 {
- font-weight: 700;
- font-size: 1rem; /* 16px */
- padding: 10px 0;
-
- @include breakpoint($desktop-large) {
- font-size: 1.625rem; /* 26px */
- }
- }
- h4 {
- font-weight: 700;
- font-size: 1rem; /* 16px */
- padding: 10px 0;
-
- @include breakpoint($desktop-large) {
- font-size: 1.5rem; /* 24px */
- }
- }
- h5 {
- font-weight: 700;
- font-size: 0.875rem; /* 14px */
- padding: 10px 0;
-
- @include breakpoint($desktop-large) {
- font-size: 1.375rem; /* 22px */
- }
- }
- h6 {
- font-weight: 700;
- font-size: 0.875rem; /* 14px */
- padding: 10px 0;
-
- @include breakpoint($desktop-large) {
- font-size: 1.25rem; /* 20px */
- }
- }
- p {
- padding: 5px 0;
- line-height: 1.65;
- }
- ul {
- list-style-type: disc;
- list-style-position: inside;
- }
- ol {
- list-style-type: decimal;
- list-style-position: inside;
- li {
- margin: 0.5rem 0;
- }
- }
- em {
- font-weight: 700;
- }
- code {
- font-family: SFMono-Regular,Consolas,Liberation Mono,Menlo,Courier,monospace;
- background-color: rgba(27,31,35,.05);
- border-radius: 3px;
- font-size: 95%;
- margin: 0;
- padding: .2em .4em;
- }
- pre {
- word-wrap: normal;
- display: block;
- margin: 1rem 0;
- padding: 1rem 1rem;
- width: auto;
- overflow: auto;
- font-size: 95%;
- background-color: #f6f8fa;
- border-radius: 4px;
- code {
- font-family: SFMono-Regular,Consolas,Liberation Mono,Menlo,Courier,monospace;
- font-size: 95%;
- white-space: inherit;
- padding: 0;
- background-color: transparent;
- line-height: 2;
- }
- }
-}
-
-.mobile-only {
- @include breakpoint($desktop-small) {
- display: none;
- }
-}
-
-.desktop-only {
- display: none;
-
- @include breakpoint($desktop-small) {
- display: block;
- }
-}
diff --git a/_sass/modules/navigation.scss b/_sass/modules/navigation.scss
deleted file mode 100644
index 8c7077297..000000000
--- a/_sass/modules/navigation.scss
+++ /dev/null
@@ -1,168 +0,0 @@
-.nav {
- height: 100px;
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- z-index: $z-above-top;
-
- @include breakpoint($desktop-small) {
- height: 120px;
- }
-
- &.dark {
- position: relative;
- background: $color--black;
- }
-
- .logo {
- width: 77px;
- position: absolute;
- top: 50%;
- transform: translateY(-50%);
- left: $spacing--s;
-
- @include breakpoint($desktop-small) {
- width: 90px;
- left: $spacing--xl;
- }
- }
-
- .hamburger {
- position: absolute;
- top: 50%;
- right: $spacing--l;
- height: 22px;
- width: 30px;
- z-index: $z-above-top;
- transform: translateY(-50%);
-
- @include breakpoint($desktop-small) {
- display: none;
- }
-
- .hamburger-line {
- height: 4px;
- width: 30px;
- background: $color--white;
- position: absolute;
- left: 0;
- transition: $transition--primary;
- opacity: 1;
-
- &:first-child {
- top: 0;
- }
-
- &:nth-child(2) {
- top: 9px;
- }
-
- &:nth-child(3) {
- top: 18px;
- }
- }
-
- &.active {
- .hamburger-line {
- &:first-child {
- transform: rotate(45deg);
- top: 8px;
- }
-
- &:nth-child(2) {
- opacity: 0;
- }
-
- &:nth-child(3) {
- transform: rotate(-45deg);
- top: 8px;
- }
- }
- }
- }
-
- .nav-items {
- position: fixed;
- top: -110vh;
- left: 0;
- height: 100vh;
- width: 100vw;
- background: $color--black;
- padding-top: 35px;
- text-align: center;
- transition: $transition-slow;
- display: none;
- box-sizing: border-box;
-
- &.active {
- top: 0;
- display: block;
- }
-
- @include breakpoint($desktop-small) {
- display: block;
- padding-top: 0;
- position: absolute;
- right: 0;
- top: 50%;
- transform: translateY(-50%);
- left: auto;
- width: auto;
- height: auto;
- background: transparent;
- text-align: right;
-
- &.active {
- top: 40px;
- }
- }
- }
-
- .nav-item {
- padding-top: 35px;
-
- @include breakpoint($desktop-small) {
- padding-top: 0;
- margin-right: 45px;
- display: inline-block;
- }
-
- }
-
- .nav-link { /* nav bar text */
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--white;
- line-height: 1;
- font-size: 1.5rem;
- letter-spacing: 0.02em;
-
- @include breakpoint($desktop-small) {
- font-size: 1.25rem; /* 20px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1.5rem; /* 24px */
- }
- }
-
- .social-link {
- display: flex;
- justify-content: center;
- }
-
- .socials {
- .connect-content {
- position: absolute;
- bottom: 80px;
- left: 50%;
- transform: translateX(-50%);
- width: 300px;
-
- @include breakpoint($desktop-small) {
- display: none;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/_sass/modules/news.scss b/_sass/modules/news.scss
deleted file mode 100644
index 4199d1706..000000000
--- a/_sass/modules/news.scss
+++ /dev/null
@@ -1,257 +0,0 @@
-.news-container {
- margin-top: $spacing--xs;
-
- @include breakpoint($desktop-small) {
- margin-top: $spacing--l;
- }
- @include breakpoint($desktop-medium) {
- margin-top: 50px;
- }
- @include breakpoint($desktop-large) {
- margin-top: 70px;
- margin-bottom: $spacing--l;
- }
-}
-
-.news-wrapper {
- padding: $spacing--m 0;
-
- // &:last-of-type {
- // padding: $spacing--m 0 $spacing--xs;
- // }
-
- @include breakpoint($desktop-small) {
- padding: $spacing--m 38% $spacing--m 0;
- }
-
- @include breakpoint($desktop-large) {
- padding: $spacing--m 30% $spacing--m 0;
- }
-
-
- .news-date {
- display: block;
- margin-bottom: $spacing--xxs;
-
- @include breakpoint($desktop-large) {
- margin-bottom: $spacing--s;
- }
- }
-
- .news-link {
- margin-top: $spacing--s;
- display: block;
- text-decoration: underline;
-
- @include breakpoint($desktop-small) {
- display: none;
- }
- }
-
- .news-text-wrapper {
-
- @include breakpoint($desktop-small) {
- flex: 1;
- }
- }
-
- .news-title {
- text-decoration: underline;
- margin-bottom: $spacing--s;
- }
-
- .flex-row {
- @include breakpoint($desktop-small) {
- display: flex;
- flex-direction: row;
- align-items: flex-start;
- }
- }
-
- .news-image {
- display: none;
-
- @include breakpoint($desktop-small) {
- display: block;
- width: 130px;
- height: 130px;
- object-fit: cover;
- margin-left: 30px;
- }
-
- @include breakpoint($desktop-large) {
- width: 200px;
- height: 200px;
- }
- }
-
- a {
- color: $color--black;
- }
-}
-
-.news-date {
- margin-bottom: -3px;
-
- @include breakpoint($desktop-small) {
- margin-bottom: 2px;
- }
-}
-
-.news-date,
-.news-title {
- display: block;
-}
-
-.news-list-item {
- margin-bottom: $spacing--l;
-
- @include breakpoint($desktop-small) {
- margin-bottom: 35px;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 40px;
- }
-}
-
-.rss {
- z-index: 10;
- padding-left: 8px;
-}
-
-.icon-size {
- height: 24px;
- width: 24px;
- @include breakpoint($desktop-small) {
- height: 32px;
- width: 32px;
- }
- @include breakpoint($desktop-large) {
- height: 48px;
- width: 48px;
- }
-}
-
-.section-title {
- .header-shadow {
- z-index: -1;
- }
-}
-
-.pagination {
- @include clearfix();
- margin-top: 1em;
- padding-top: 1em;
- width: 100%;
-
- ul {
- margin: 0;
- padding: 0;
- list-style-type: none;
- // font-family: $sans-serif;
- }
-
- li {
- display: block;
- float: left;
- margin-left: -1px;
-
- a {
- display: block;
- margin-bottom: 0.25em;
- padding: 0.2em 0.8em;
- // font-family: $sans-serif;
- font-size: 14px;
- font-weight: bold;
- line-height: 1.5;
- text-align: center;
- text-decoration: none;
- color: $muted-text-color;
- border: 1px solid mix(#000, $border-color, 25%);
- border-radius: 0;
-
- @include breakpoint($phone-large) {
- padding: 0.5em 1em;
- }
-
- &:hover {
- color: $link-color-hover;
- }
-
- &.current,
- &.current.disabled {
- color: #fff;
- background: $primary-color;
- }
-
- &.disabled {
- color: rgba($muted-text-color, 0.5);
- pointer-events: none;
- cursor: not-allowed;
- }
- }
-
- &:first-child {
- margin-left: 0;
-
- a {
- border-top-left-radius: $border-radius;
- border-bottom-left-radius: $border-radius;
- }
- }
-
- &:last-child {
- a {
- border-top-right-radius: $border-radius;
- border-bottom-right-radius: $border-radius;
- }
- }
- }
-
- /* next/previous buttons */
- &--pager {
- display: block;
- padding: 1em 2em;
- float: left;
- width: 50%;
- // font-family: $sans-serif;
- font-size: $type-size-5;
- font-weight: bold;
- text-align: center;
- text-decoration: none;
- color: $muted-text-color;
- border: 1px solid mix(#000, $border-color, 25%);
- border-radius: $border-radius;
-
- &:hover {
- @include yiq-contrasted($muted-text-color);
- }
-
- &:first-child {
- border-top-right-radius: 0;
- border-bottom-right-radius: 0;
- }
-
- &:last-child {
- margin-left: -1px;
- border-top-left-radius: 0;
- border-bottom-left-radius: 0;
- }
-
- &.disabled {
- color: rgba($muted-text-color, 0.5);
- pointer-events: none;
- cursor: not-allowed;
- }
- }
-}
-
-.page__content + .pagination,
-.page__meta + .pagination,
-.page__share + .pagination,
-.page__comments + .pagination {
- margin-top: 2em;
- padding-top: 2em;
- border-top: 1px solid $border-color;
-}
\ No newline at end of file
diff --git a/_sass/modules/slack.scss b/_sass/modules/slack.scss
deleted file mode 100644
index 63c0e223f..000000000
--- a/_sass/modules/slack.scss
+++ /dev/null
@@ -1,33 +0,0 @@
-.slack {
- .slack-link,
- .section-title {
- margin-bottom: $spacing--m;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--xl;
- }
- }
-
- .slack-paragraph {
- margin-bottom: $spacing--s;
- }
-
- .slack-list-item {
- margin-top: $spacing--l;
- h4, p {
- margin-bottom: $spacing--s;
- }
- }
-
- .header-shadow {
- white-space: nowrap;
- }
-
- .link {
- text-decoration: underline;
- }
-
- em {
- font-style: italic;
- }
-}
diff --git a/_sass/modules/social.scss b/_sass/modules/social.scss
deleted file mode 100644
index ba5814e4c..000000000
--- a/_sass/modules/social.scss
+++ /dev/null
@@ -1,173 +0,0 @@
-.section.connect {
- padding: 55px $spacing--l 50px;
- text-align: center;
- background: url('/images/contact-bg-mobile.png') no-repeat center/cover;
-
- @include breakpoint($desktop-small) {
- background: url('/images/contact-bg.png') no-repeat center/cover;
- height: calc(100vh - 130px);
- padding: 0;
- position: relative;
- }
-
- @include breakpoint($desktop-large) {
- height: calc(100vh - 200px);
- }
-
- .section-content {
- @include breakpoint($desktop-small) {
- position: absolute;
- text-align: center;
- width: 100%;
- top: calc(50% + 25px);
- transform: translateY(-50%);
- }
-
- @include breakpoint($desktop-large) {
- top: calc(50% + 40px);
- }
- }
-
- .section-title {
- margin-bottom: $spacing--m;
-
- @include breakpoint($desktop-small) {
- margin-bottom: 35px;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 60px;
- }
- }
-
- .header-shadow {
- color: $color--white;
- opacity: 0.15;
- }
-}
-
-.socials {
- .connect-content {
- width: 270px;
- margin: 0 auto;
-
- @include breakpoint($desktop-small) {
- width: 780px;
- }
-
- @include breakpoint($desktop-large) {
- width: 1140px;
- }
- }
-
- .col-fourth {
- padding: 0;
- }
-
- .social-icon {
- text-align: center;
- display: flex;
- align-items: center;
- flex-direction: column;
- justify-content: center;
- height: 52px;
- width: 52px;
-
- @include breakpoint($desktop-small) {
- height: 180px;
- width: 180px;
- }
-
- @include breakpoint($desktop-large) {
- height: 270px;
- width: 270px;
- }
- }
-
- .social-icon-img-wrapper {
- height: 25px;
- margin-bottom: $spacing--xxs;
- display: flex;
- align-items: flex-end;
-
- @include breakpoint($desktop-small) {
- height: 85px;
- margin-bottom: $spacing--s;
- }
-
- @include breakpoint($desktop-large) {
- height: 125px;
- margin-bottom: 30px;
- }
- }
-
- .social-icon-img {
- display: block;
-
- &.github {
- width: 27px;
-
- @include breakpoint($desktop-small) {
- width: 90px;
- }
-
- @include breakpoint($desktop-large) {
- width: 135px;
- }
- }
-
- &.facebook {
- width: 13px;
-
- @include breakpoint($desktop-small) {
- width: 45px;
- }
-
- @include breakpoint($desktop-large) {
- width: 63px;
- }
- }
-
- &.twitter {
- width: 23px;
-
- @include breakpoint($desktop-small) {
- width: 80px;
- }
-
- @include breakpoint($desktop-large) {
- width: 120px;
- }
- }
-
- &.linkedin{
- width: 24px;
-
- @include breakpoint($desktop-small) {
- width: 78px;
- }
-
- @include breakpoint($desktop-large) {
- width: 117px;
- }
- }
- }
-
- .social-icon-text {
- font-family: $font-family--roboto;
- font-weight: 700;
- color: $color--white;
- line-height: 1;
- font-size: 0.625rem; /* 10px */
- letter-spacing: 0.02em;
-
- @include breakpoint($desktop-small) {
- font-size: 1.25rem; /* 20px */
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1.875rem; /* 30px */
- }
- }
-}
-
diff --git a/_sass/pages/homepage.scss b/_sass/pages/homepage.scss
deleted file mode 100644
index d388e5c15..000000000
--- a/_sass/pages/homepage.scss
+++ /dev/null
@@ -1,370 +0,0 @@
-.mission {
- .mission-img {
- width: 100%;
- margin: $spacing--s auto 0;
- max-width: 600px;
- display: block;
-
- @include breakpoint($desktop-small) {
- margin: 0;
- position: relative;
- top: 90px;
- }
-
- @include breakpoint($desktop-large) {
- top: 160px;
- }
- }
-
- .col-right {
- text-align: right;
- box-sizing: border-box;
-
- @include breakpoint($desktop-small) {
- padding-left: 65px;
- }
-
- @include breakpoint($desktop-large) {
- padding-left: 105px;
- }
- }
-
- li:last-child {
- margin-bottom: $spacing--s;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--m;
- }
- }
-}
-
-.sponsor {
- padding: 50px $spacing--l 90px;
-
- @include breakpoint($desktop-small) {
- padding: 65px 100px 55px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 65px 245px 175px;
- }
-
- .section-title {
- margin-bottom: 30px;
-
- @include breakpoint($desktop-large) {
- margin-bottom: 50px;
- }
- }
-
- .sponsor-img {
- display: block;
- width: 75%;
- margin: $spacing--s auto 0;
- max-width: 600px;
- display: block;
-
- @include breakpoint($desktop-small) {
- margin: 0;
- position: relative;
- width: 50%;
- left: 190px;
- top: -45px;
- }
-
- @include breakpoint($desktop-large) {
- max-width: 410px;
- width: 59%;
- left: 200px;
- top: -65px;
- }
- }
-}
-
-
-.activities {
- padding: $spacing--xxl $spacing--l $spacing--xxl;
-
- @include breakpoint($desktop-small) {
- padding: 55px 100px 30px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 75px 245px 50px;
- }
-
- .section-title {
- @include breakpoint($tablet) {
- margin-bottom: $spacing--l;
- }
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--xs;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 40px;
- }
- }
-
- .activities-content {
- text-align: center;
- }
-
- .activity-title {
- line-height: 1;
- margin: $spacing--xs 0 $spacing--xs;
-
- @include breakpoint($desktop-small) {
- margin: $spacing--l 0 $spacing--s;
- }
-
- @include breakpoint($desktop-large) {
- margin: 50px 0 $spacing--s;
- }
- }
-
- .activity-col {
- margin-bottom: 40px;
-
- &:last-child {
- margin-bottom: 0;
- }
- }
-
- .activities-img-wrapper {
-
- @include breakpoint($tablet) {
- height: 115px;
- }
-
- @include breakpoint($desktop-small) {
- height: 105px;
- display: flex;
- justify-content: center;
- align-items: center;
- }
-
- @include breakpoint($desktop-large) {
- height: 150px;
- }
- }
-
- .activities-img {
-
- &.libraries {
- height: 90px;
-
- @include breakpoint($desktop-small) {
- height: 95px;
- }
-
- @include breakpoint($desktop-large) {
- height: 150px;
- }
- }
-
- &.communities {
- height: 110px;
-
- @include breakpoint($desktop-small) {
- height: 105px;
- }
-
- @include breakpoint($desktop-large) {
- height: 150px;
- }
- }
-
- &.standards {
- height: 105px;
-
- @include breakpoint($desktop-small) {
- height: 105px;
- }
-
- @include breakpoint($desktop-large) {
- height: 150px;
- }
- }
-
- &.education {
- height: 80px;
-
- @include breakpoint($desktop-small) {
- height: 80px;
- }
-
- @include breakpoint($desktop-large) {
- height: 120px;
- }
- }
- }
-}
-
-.team {
- background: $color--black;
- padding: 75px $spacing--l 50px;
-
- @include breakpoint($desktop-small) {
- padding: 145px 100px 75px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 220px 245px 185px;
- }
-
- .section-title {
- margin-bottom: $spacing--l;
-
- @include breakpoint($desktop-large) {
- margin-bottom: 60px;
- }
- }
-
- /* to solve uneven # of cards in a row */
- .row:last-child {
- .col-third:last-child {
- margin-bottom: 0;
- }
- }
-}
-
-.alumni {
- padding-top: 25px;
-}
-
-.news {
- padding: 55px $spacing--l 60px;
-
- @include breakpoint($desktop-small) {
- padding: 135px 100px 95px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 190px 245px 160px;
- }
-
- .section-title {
- margin-bottom: $spacing--m;
-
- @include breakpoint($desktop-small) {
- margin-bottom: 40px;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 55px;
- }
- }
-
- li {
- padding-left: 0;
- }
-
- .news-list-item {
- margin-bottom: $spacing--l;
-
- @include breakpoint($desktop-small) {
- margin-bottom: 35px;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 40px;
- }
- }
-
- .all {
- margin-top: $spacing--xxl;
- display: block;
-
- @include breakpoint($desktop-small) {
- margin-top: 60px;
- }
-
- @include breakpoint($desktop-large) {
- margin-top: 90px;
- }
- }
-
- li:before {
- content: none;
- }
-}
-
-.links {
- background: $color--gray;
- padding: 65px $spacing--l;
-
- @include breakpoint($desktop-small) {
- padding: 145px 100px 125px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 230px 245px 200px;
- }
-
- .header-shadow {
- color: $color--slate;
- }
-
- .section-title {
- margin-bottom: $spacing--m;
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--xxl;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: 65px;
- }
- }
-
- .link-item {
- margin-bottom: $spacing--m;
- padding-left: 0;
-
- &:before {
- content: none;
- }
-
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--xl;
- }
-
- @include breakpoint($desktop-large) {
- margin-bottom: $spacing--xxl;
- }
- }
-}
-
-.faq {
- padding: 65px $spacing--l $spacing--xxl;
-
- @include breakpoint($desktop-small) {
- padding: 130px 100px 60px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 215px 245px 135px;
- }
-
- .section-title {
- margin-bottom: $spacing--m;
-
- @include breakpoint($desktop-large) {
- margin-bottom: $spacing--xxl;
- }
-
- }
-
- .faq-content {
-
- @include breakpoint($desktop-small) {
- max-width: 770px;
- }
-
- @include breakpoint($desktop-large) {
- max-width: 1345px;
- }
- }
-}
\ No newline at end of file
diff --git a/_sass/pages/post.scss b/_sass/pages/post.scss
deleted file mode 100644
index 5ae290403..000000000
--- a/_sass/pages/post.scss
+++ /dev/null
@@ -1,313 +0,0 @@
-.post {
-
- display: flex;
- flex-direction: column;
- @include breakpoint($desktop-small) {
-
- min-height: calc(100vh - 75px - 130px); /* 100vh - header - footer */
- }
-
- @include breakpoint($desktop-large) {
- min-height: calc(100vh - 100px - 200px); /* 100vh - header - footer */
- }
-
- .title-section {
- margin-bottom: 80px;
- border-bottom: 1px solid #e2e2e2;
- padding-bottom: 8px;
- }
-
- .current-article {
-
- @include breakpoint($desktop-small) {
- // width: 66%;
- }
-
- .news-title {
- font-size: 2.5rem;
- text-align: center;
- @include breakpoint($desktop-small) {
- font-size: 3.5rem;
- }
- }
- }
-
- .post-hero {
- height: 220px;
- width: 100%;
- object-fit: cover;
-
- @include breakpoint($desktop-small) {
- height: 710px;
- }
- }
-
- .article {
- position: relative;
- padding: $spacing--xxl $spacing--l 100px;
-
- @include breakpoint($desktop-small) {
- padding: 60px 100px 70px;
- }
-
- // @include breakpoint($desktop-large) {
- // padding: 80px 245px 95px;
- // }
-
- @media only screen and (min-width: 1500px) {
- padding: 80px 245px 95px;
- }
-
- @media only screen and (min-width: 2000px) {
- max-width: 1510px;
- margin: auto;
- }
-
- article, article p { font-size: 16px !important;}
-
- a {
- color: #0366d6;
- text-decoration: none;
- font-weight: normal;
- line-height: 1.5;
- }
-
- blockquote {
- padding: 0 1em;
- color: #6a737d;
- border-left: .25em solid #dfe2e5;
- margin-top: 11px;
- margin-bottom: 1rem;
- }
-
- em {
- font-weight: normal;
- font-style: italic;
- }
-
- h1, h2 {
- padding-top: 0;
- margin-bottom: 16px;
- padding-bottom: .3em;
- margin-top: 24px;
- &.no-border {
- border-bottom: none;
- }
- }
-
- h1 {
- line-height: 2.5rem;
- font-size: 2em;
- }
-
- h2 {
- font-size: 1.7em;
- }
-
- ol {
- padding-left: 30px;
- list-style-position: outside;
- }
-
- p {
- line-height: 1.5;
- }
-
- p code {
- line-height: 1.5;
- font-size: 85%;
- }
-
- pre code {
- overflow: auto;
- font-size: 13.6px !important;
- line-height: 1.45;
- background-color: #f6f8fa;
- border-radius: 3px;
- font-family: SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace;
- -moz-tab-size: 4;
- tab-size: 4;
- }
-
- strong, strong em {
- font-weight: 600;
- }
-
- ul {
- margin-bottom: 16px;
- li {
- font-size: 1rem;
- }
- }
- }
-
- .caption {
- font-family: $font-family--roboto;
- font-weight: 400;
- font-size: 0.75rem; /* 12px */
- position: absolute;
- color: $color--stone;
- top: $spacing--xxs;
- left: $spacing--xxs;
- line-height: 1.25;
-
- @include breakpoint($desktop-small) {
- left: $spacing--m;
- top: $spacing--xs;
- }
-
- @include breakpoint($desktop-large) {
- font-size: 14px;
- left: $spacing--l;
- top: $spacing--xs;
- }
- }
- .news {
- &.bottom-layout {
- padding-top: 100px;
- padding-bottom: 80px;
- }
- }
-
- .news-title {
- margin-bottom: $spacing--s;
- }
-
- .author {
- // margin-bottom: $spacing--m;
- text-align: center;
- margin-top: -25px;
- }
-
- .author-img {
- width: 40px;
- height: 40px;
- border-radius: 50%;
- vertical-align: middle;
-
- @include breakpoint($desktop-small) {
- height: 45px;
- width: 45px;
- }
- }
-
- .author-name {
- // vertical-align: middle;
- margin-left: 2px;
-
- @include breakpoint($desktop-small) {
- margin-left: $spacing--xxs;
- }
- }
-
- .content-text {
- line-height: 1.4;
- }
-
- .news {
- background: $color--gray;
- // padding: 40px $spacing--l 65px;
-
- @include breakpoint($desktop-small) {
- // padding: 50px 60px;
- // width: 34%;
- }
-
- .news-date {
- @include breakpoint($desktop-small) {
- // display: none;
- }
- }
-
- .news-title {
- @include breakpoint($desktop-small) {
- font-size: 1.25rem;
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1.875rem;
- }
- }
- }
-
- .recent-post-header {
- @include breakpoint($desktop-small) {
- font-size: 40px;
- }
- }
-
- .header-shadow {
- @include breakpoint($desktop-small) {
- display: none;
- }
- }
-
- .news-title,
- .all {
- @include breakpoint($desktop-small) {
- font-size: 1rem;
- }
- }
-
- .all {
- @include breakpoint($desktop-small) {
- margin-top: 55px;
- }
- }
-
- .news-list-item {
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--l;
- }
- }
-
- .section-title {
- @include breakpoint($desktop-small) {
- margin-bottom: 35px;
- }
- }
-
- table {
- display: block;
- width: 100%;
- overflow: auto;
- vertical-align: middle;
- box-sizing: border-box;
- border-collapse: collapse;
- margin: .8rem 0;
- th {
- font-weight: 600;
- text-align: center;
- }
-
- th,
- td {
- padding: 6px 13px;
- border: 1px solid #dfe2e5;
- vertical-align: middle;
- }
-
- tr {
- border-top: 1px solid #c6cbd1;
-
-
- &:nth-child(2n) {
- background-color: #f6f8fa;
- }
- }
-
- img {
- background-color: transparent;
- }
- }
-}
-
-.center {
- text-align: center;
-}
-.d-block {
- display: block;
-}
-.d-iblock {
- display: inline-block
-}
\ No newline at end of file
diff --git a/_sass/pages/post_backup.scss b/_sass/pages/post_backup.scss
deleted file mode 100644
index d18ee1149..000000000
--- a/_sass/pages/post_backup.scss
+++ /dev/null
@@ -1,277 +0,0 @@
-.post {
-
- @include breakpoint($desktop-small) {
- display: flex;
- min-height: calc(100vh - 75px - 130px); /* 100vh - header - footer */
- }
-
- @include breakpoint($desktop-large) {
- min-height: calc(100vh - 100px - 200px); /* 100vh - header - footer */
- }
-
- .current-article {
- @include breakpoint($desktop-small) {
- width: 66%;
- }
-
- .news-title {
- @include breakpoint($desktop-small) {
- font-size: 2rem;
- }
- }
- }
-
- .post-hero {
- height: 220px;
- width: 100%;
- object-fit: cover;
-
- @include breakpoint($desktop-small) {
- height: 710px;
- }
- }
-
- .article {
- position: relative;
- padding: $spacing--xxl $spacing--l 100px;
-
- @include breakpoint($desktop-small) {
- padding: 60px 100px 70px;
- }
-
- // @include breakpoint($desktop-large) {
- // padding: 80px 245px 95px;
- // }
-
- @media only screen and (min-width: 1500px) {
- padding: 80px 140px 95px;
- }
-
- article, article p { font-size: 16px !important;}
-
- a {
- color: #0366d6;
- text-decoration: none;
- font-weight: normal;
- line-height: 1.5;
- }
-
- blockquote {
- padding: 0 1em;
- color: #6a737d;
- border-left: .25em solid #dfe2e5;
- margin-top: 11px;
- margin-bottom: 1rem;
- }
-
- em {
- font-weight: normal;
- font-style: italic;
- }
-
- h1, h2 {
- border-bottom: 1px solid #eaecef;
- padding-top: 0;
- margin-bottom: 16px;
- padding-bottom: .3em;
- margin-top: 24px;
- }
-
- h1 {
- line-height: 2.5rem;
- font-size: 2em;
- }
-
- h2 {
- font-size: 1.7em;
- }
-
- ol {
- padding-left: 30px;
- list-style-position: outside;
- }
-
- p {
- line-height: 1.5;
- }
-
- p code {
- line-height: 1.5;
- font-size: 85%;
- }
-
- pre code {
- overflow: auto;
- font-size: 13.6px !important;
- line-height: 1.45;
- background-color: #f6f8fa;
- border-radius: 3px;
- font-family: SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace;
- -moz-tab-size: 4;
- tab-size: 4;
- }
-
- strong, strong em {
- font-weight: 600;
- }
-
- ul {
- margin-bottom: 16px;
- li {
- font-size: 1rem;
- }
- }
- }
-
- .caption {
- font-family: $font-family--roboto;
- font-weight: 400;
- font-size: 0.75rem; /* 12px */
- position: absolute;
- color: $color--stone;
- top: $spacing--xxs;
- left: $spacing--xxs;
- line-height: 1.25;
-
- @include breakpoint($desktop-small) {
- left: $spacing--m;
- top: $spacing--xs;
- }
-
- @include breakpoint($desktop-large) {
- font-size: 14px;
- left: $spacing--l;
- top: $spacing--xs;
- }
- }
-
- .news-title {
- margin-bottom: $spacing--s;
- }
-
- .author {
- margin-bottom: $spacing--m;
- }
-
- .author-img {
- width: 40px;
- height: 40px;
- border-radius: 50%;
- vertical-align: middle;
-
- @include breakpoint($desktop-small) {
- height: 45px;
- width: 45px;
- }
- }
-
- .author-name {
- vertical-align: middle;
- margin-left: 2px;
-
- @include breakpoint($desktop-small) {
- margin-left: $spacing--xxs;
- }
- }
-
- .content-text {
- line-height: 1.4;
- }
-
- .news {
- background: $color--gray;
- padding: 40px $spacing--l 65px;
-
- @include breakpoint($desktop-small) {
- padding: 50px 60px;
- width: 34%;
- }
-
- .news-date {
- @include breakpoint($desktop-small) {
- display: none;
- }
- }
-
- .news-title {
- @include breakpoint($desktop-small) {
- font-size: 1.25rem;
- }
-
- @include breakpoint($desktop-large) {
- font-size: 1.875rem;
- }
- }
- }
-
- .recent-post-header {
- @include breakpoint($desktop-small) {
- font-size: 40px;
- }
- }
-
- .header-shadow {
- @include breakpoint($desktop-small) {
- display: none;
- }
- }
-
- .news-title,
- .all {
- @include breakpoint($desktop-small) {
- font-size: 1rem;
- }
- }
-
- .all {
- @include breakpoint($desktop-small) {
- margin-top: 55px;
- }
- }
-
- .news-list-item {
- @include breakpoint($desktop-small) {
- margin-bottom: $spacing--l;
- }
- }
-
- .section-title {
- @include breakpoint($desktop-small) {
- margin-bottom: 35px;
- }
- }
-
- table {
- display: block;
- width: 100%;
- overflow: auto;
- vertical-align: middle;
- box-sizing: border-box;
- border-collapse: collapse;
- margin: .8rem 0;
- th {
- font-weight: 600;
- text-align: center;
- }
-
- th,
- td {
- padding: 6px 13px;
- border: 1px solid #dfe2e5;
- vertical-align: middle;
- }
-
- tr {
- border-top: 1px solid #c6cbd1;
-
-
- &:nth-child(2n) {
- background-color: #f6f8fa;
- }
- }
-
- img {
- background-color: transparent;
- }
- }
-}
\ No newline at end of file
diff --git a/_sass/pages/team.scss b/_sass/pages/team.scss
deleted file mode 100644
index c73ae99a8..000000000
--- a/_sass/pages/team.scss
+++ /dev/null
@@ -1,134 +0,0 @@
-.team-member {
- .team {
- padding: 50px $spacing--l 75px;
- background: $color--white;
-
- @include breakpoint($desktop-small) {
- padding: 85px 100px 150px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 160px 245px 190px;
- }
- }
-
- .flex-row {
- @include breakpoint($desktop-small) {
- margin-top: 55px;
- display: flex;
- justify-content: space-between;
- }
- }
-
- .team-member-img-wrapper {
- @include breakpoint($desktop-small) {
- width: 35%;
- padding-bottom: 33%;
- height: 0;
- order: 2;
- position: relative;
- top: -$spacing--xxl;
- }
- }
-
- .team-member-img {
- height: 325px;
- margin-bottom: 40px;
- object-fit: cover;
-
-
- @include breakpoint($desktop-small) {
- position: absolute;
- height: 100%;
- width: 100%;
- }
- }
-
- .title {
- margin: 3px 0 35px;
- display: block;
-
- @include breakpoint($desktop-small) {
- margin: $spacing--xxs 0 30px;
- }
- }
-
- .bio {
- @include breakpoint($desktop-small) {
- order: 1;
- width: 50%;
- }
- }
-
- .social-icons {
- display: flex;
- align-items: center;
- margin-bottom: 35px;
- }
-
- .social-icon {
- width: 25px;
- margin-right: 30px;
-
- @include breakpoint($desktop-small) {
- width: 27px;
- }
-
- &.web {
- width: 23px;
-
- @include breakpoint($desktop-small) {
- width: 25px;
- }
- }
-
- &.resume {
- width: 21px;
-
- @include breakpoint($desktop-small) {
- width: 24px;
- }
- }
-
- &.stack,
- &.linkedin {
- width: 24px;
- }
-
- &.github {
- @include breakpoint($desktop-small) {
- width: 25px;
- }
- }
-
-
- }
-
- .all {
- margin-top: 40px;
- display: block;
- font-weight: 700;
- text-decoration: underline;
-
- @include breakpoint($desktop-small) {
- margin-top: 65px;
- }
- }
-
- .news {
- background: $color--gray;
- padding: 65px $spacing--l 40px;
-
- @include breakpoint($desktop-small) {
- padding: 150px 100px 95px;
- }
-
- @include breakpoint($desktop-large) {
- padding: 170px 245px 115px;
- }
-
- .header-shadow {
- color: $color--slate;
- }
- }
-}
diff --git a/alan/2023/10/27/AlanQ3Update.html b/alan/2023/10/27/AlanQ3Update.html
new file mode 100644
index 000000000..f5d750ae8
--- /dev/null
+++ b/alan/2023/10/27/AlanQ3Update.html
@@ -0,0 +1,707 @@
+
+
+
+
+
+Alan's Q3 Update | The C++ Alliance
+
+
+
+
+
+
+
+
+
+
+
+
+Alan’s Q3 Update | The C++ Alliance
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+