# dist plan --output-format=json > plan-dist-manifest.json
# Config for 'dist'
[workspace]
members = ["cargo:.", "cargo:src/get_file_hash_core"]
# Config for 'dist'
[dist]
# The preferred dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.30.3"
# CI backends to support
ci = "github"
# The installers to generate for each app
installers = ["shell", "powershell", "homebrew", "msi"]
# A GitHub repo to push Homebrew formulas to
tap = "gnostr-org/homebrew-gnostr-org"
# Path that installers should place binaries in
install-path = "CARGO_HOME"
# Publish jobs to run in CI
publish-jobs = ["homebrew"]
# Whether to install an updater program
install-updater = true
# Target platforms to build apps for (Rust target-triple syntax)
targets = ["aarch64-apple-darwin", "aarch64-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "x86_64-pc-windows-msvc"]
# Skip checking whether the specified configuration files are up to date
allow-dirty = ["ci"]
npub1g8jq...3zcu
npub1g8jq...3zcu
/// deterministic nostr event build example
// deterministic nostr event build example
use get_file_hash_core::get_file_hash;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use get_file_hash_core::{get_git_tracked_files, DEFAULT_GNOSTR_KEY, DEFAULT_PICTURE_URL, DEFAULT_BANNER_URL};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use nostr_sdk::{EventBuilder, Keys, EventId, Tag, SecretKey, JsonUtil, Kind, Event};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use serde_json::to_string;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use std::fs;
use std::path::PathBuf;
use sha2::{Digest, Sha256};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use ::hex;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use std::io::Write;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
fn should_remove_relay(error_msg: &str) -> bool {
error_msg.contains("relay not connected") ||
error_msg.contains("not in web of trust") ||
error_msg.contains("blocked: not authorized") ||
error_msg.contains("timeout") ||
error_msg.contains("blocked: spam not permitted") ||
error_msg.contains("relay experienced an error trying to publish the latest event") ||
error_msg.contains("duplicate: event already broadcast")
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
fn write_event_json_to_file(
output_dir: &PathBuf,
filename: &str,
event: &Event,
) -> Option<()> {
let file_path = output_dir.join(filename);
if let Some(parent) = file_path.parent() {
if let Err(e) = fs::create_dir_all(parent) {
println!("cargo:warning=Failed to create parent directories for {}: {}", file_path.display(), e);
return None;
}
}
if let Err(e) = fs::File::create(&file_path).and_then(|mut file| write!(file, "{}", event.as_json())) {
println!("Failed to write event JSON to file {}: {}", file_path.display(), e);
None
} else {
println!("Successfully wrote event JSON to {}", file_path.display());
Some(())
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
async fn publish_nostr_event_if_release(
client: &mut nostr_sdk::Client,
hash: String,
keys: Keys,
event_builder: EventBuilder,
_relay_urls: &mut Vec<String>,
file_path_str: &str,
output_dir: &PathBuf,
total_bytes_sent: &mut usize,
) -> Option<EventId> {
let public_key = keys.public_key().to_string();
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await { Ok(event_output) => {
println!("cargo:warning=Published Nostr event for {}: {}", file_path_str, event_output.val);
let event_json_size = to_string(&event).map(|s| s.as_bytes().len()).unwrap_or(0);
// Print successful relays
for relay_url in event_output.success.iter() {
println!("cargo:warning=Successfully published to relay: {} ({} bytes)", relay_url, event_json_size);
*total_bytes_sent += event_json_size;
}
// Print failed relays and remove "unfriendly" relays from the list
for (relay_url, error_msg) in event_output.failed.iter() {
if should_remove_relay(error_msg) {
if let Err(e) = client.remove_relay(relay_url).await {
println!("cargo:warning=Failed to remove relay {}: {}", relay_url, e);
}
// println!("cargo:warning=Removed relay {}", relay_url);
}
}
let filename = format!("{}/{}/{}/{}.json", file_path_str, hash, public_key.clone(), event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr event for {}: {}", file_path_str, e);
None
},
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
pub async fn get_repo_announcement_event(
client: &mut nostr_sdk::Client,
_keys: &Keys,
relay_urls: &Vec<String>,
repo_url: &str,
repo_name: &str,
repo_description: &str,
git_commit_hash: &str,
git_branch: &str,
output_dir: &PathBuf,
public_key_hex: &str,
) -> Option<EventId> {
let mut tags = vec![
Tag::parse(["d", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["name", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["description", repo_description].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["web", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["clone", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["r", git_commit_hash, "euc"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["commit", git_commit_hash].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["branch", git_branch].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["maintainers", "gnostr"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
//Tag::parse(["t", "personal-fork"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["t", "gnostr"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["t", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
// Append each relay url
for relay in relay_urls {
tags.push(Tag::parse(["relays", relay].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap());
}
let event_builder = EventBuilder::new(Kind::Custom(30617), repo_description).tags(tags);
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await {
Ok(event_output) => {
println!("cargo:warning=Published Nostr Repository Announcement for {}: {}", repo_name, event_output.val);
let filename = format!("30617/{}/{}/{}.json", repo_name, public_key_hex, event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr Repository Announcement for {}: {}", repo_name, e);
None
},
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
pub async fn get_repo_patch_event(
client: &mut nostr_sdk::Client,
_keys: &Keys,
_relay_urls: &Vec<String>,
repo_url: &str,
repo_name: &str,
repo_description: &str,
git_commit_hash: &str,
git_branch: &str,
output_dir: &PathBuf,
public_key_hex: &str,
) -> Option<EventId> {
let tags = vec![
Tag::parse(["r", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["name", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["description", repo_description].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["commit", git_commit_hash].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["branch", git_branch].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
let event_builder = EventBuilder::new(Kind::Custom(1617), repo_description).tags(tags);
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await {
Ok(event_output) => {
println!("cargo:warning=Published Nostr Repository Announcement for {}: {}", repo_name, event_output.val);
let filename = format!("30617/{}/{}/{}.json", repo_name, public_key_hex, event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr Repository Announcement for {}: {}", repo_name, e);
None
},
}
}
#[tokio::main]
async fn main() {
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let is_git_repo = std::path::Path::new(&manifest_dir).join(".git").exists();
println!("cargo:rustc-env=CARGO_PKG_NAME={}", env!("CARGO_PKG_NAME"));
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", env!("CARGO_PKG_VERSION"));
if is_git_repo {
let git_commit_hash_output = std::process::Command::new("git")
.args(&["rev-parse", "HEAD"])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.expect("Failed to execute git command for commit hash");
let git_commit_hash_str = if git_commit_hash_output.status.success() && !git_commit_hash_output.stdout.is_empty() {
String::from_utf8(git_commit_hash_output.stdout).unwrap().trim().to_string()
} else {
println!("cargo:warning=Git commit hash command failed or returned empty. Status: {:?}, Stderr: {}",
git_commit_hash_output.status, String::from_utf8_lossy(&git_commit_hash_output.stderr));
String::new()
};
println!("cargo:rustc-env=GIT_COMMIT_HASH={}", git_commit_hash_str);
let git_branch_output = std::process::Command::new("git")
.args(&["rev-parse", "--abbrev-ref", "HEAD"])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.expect("Failed to execute git command for branch name");
let git_branch_str = if git_branch_output.status.success() && !git_branch_output.stdout.is_empty() {
String::from_utf8(git_branch_output.stdout).unwrap().trim().to_string()
} else {
println!("cargo:warning=Git branch command failed or returned empty. Status: {:?}, Stderr: {}",
git_branch_output.status, String::from_utf8_lossy(&git_branch_output.stderr));
String::new()
};
println!("cargo:rustc-env=GIT_BRANCH={}", git_branch_str);
} else {
println!("cargo:rustc-env=GIT_COMMIT_HASH=");
println!("cargo:rustc-env=GIT_BRANCH=");
}
println!("cargo:rerun-if-changed=.git/HEAD");
//#[cfg(all(not(debug_assertions), feature = "nostr"))]
//let relay_urls = get_file_hash_core::get_relay_urls();
let cargo_toml_hash = get_file_hash!("Cargo.toml");
println!("cargo:rustc-env=CARGO_TOML_HASH={}", cargo_toml_hash);
let lib_hash = get_file_hash!("src/lib.rs");
println!("cargo:rustc-env=LIB_HASH={}", lib_hash);
let build_hash = get_file_hash!("build.rs");
println!("cargo:rustc-env=BUILD_HASH={}", build_hash);
println!("cargo:rerun-if-changed=Cargo.toml");
println!("cargo:rerun-if-changed=src/lib.rs");
println!("cargo:rerun-if-changed=build.rs");
let online_relays_csv_path = PathBuf::from(&manifest_dir).join("src/get_file_hash_core/src/online_relays_gps.csv");
if online_relays_csv_path.exists() {
println!("cargo:rerun-if-changed={}", online_relays_csv_path.to_str().unwrap());
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
if cfg!(not(debug_assertions)) {
println!("cargo:warning=Nostr feature enabled: Build may take longer due to network operations (publishing events to relays).");
// This code only runs in release builds
let package_version = std::env::var("CARGO_PKG_VERSION").unwrap();
let output_dir = PathBuf::from(format!(".gnostr/build/{}", package_version));
if let Err(e) = fs::create_dir_all(&output_dir) {
println!("cargo:warning=Failed to create output directory {}: {}", output_dir.display(), e);
}
let files_to_publish: Vec<String> = get_git_tracked_files(&PathBuf::from(&manifest_dir));
// Initialize client and keys once
let initial_keys = Keys::new(SecretKey::from_hex(&hex::encode(Sha256::digest("initial_seed".as_bytes()))).expect("Failed to create initial Nostr keys"));
let mut client = nostr_sdk::Client::new(initial_keys.clone());
let mut relay_urls = get_file_hash_core::get_relay_urls();
// Add relays to the client
for relay_url in relay_urls.iter() {
if let Err(e) = client.add_relay(relay_url).await {
println!("cargo:warning=Failed to add relay {}: {}", relay_url, e);
}
}
client.connect().await;
println!("cargo:warning=Added and connected to {} relays.", relay_urls.len());
let mut published_event_ids: Vec<Tag> = Vec::new();
let mut total_bytes_sent: usize = 0;
for file_path_str in &files_to_publish {
println!("cargo:warning=Processing file: {}", file_path_str);
match fs::read(file_path_str) {
Ok(bytes) => {
let mut hasher = Sha256::new();
hasher.update(&bytes);
let result = hasher.finalize();
let file_hash_hex = hex::encode(result);
match SecretKey::from_hex(&file_hash_hex.clone()) {
Ok(secret_key) => {
let keys = Keys::new(secret_key);
let content = String::from_utf8_lossy(&bytes).into_owned();
let tags = vec![
Tag::parse(["file", file_path_str].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["version", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
let event_builder = EventBuilder::text_note(content).tags(tags);
if let Some(event_id) = publish_nostr_event_if_release(&mut client, file_hash_hex, keys.clone(), event_builder, &mut relay_urls, file_path_str, &output_dir, &mut total_bytes_sent).await {
published_event_ids.push(Tag::event(event_id));
}
// Publish metadata event
get_file_hash_core::publish_metadata_event(
&keys,
&relay_urls,
DEFAULT_PICTURE_URL,
DEFAULT_BANNER_URL,
file_path_str,
).await;
}
Err(e) => {
println!("cargo:warning=Failed to derive Nostr secret key for {}: {}", file_path_str, e);
}
}
}
Err(e) => {
println!("cargo:warning=Failed to read file {}: {}", file_path_str, e);
}
}
}
// Create and publish the build_manifest
if !published_event_ids.is_empty() {
//TODO this will be either the default or detected from env vars PRIVATE_KEY
let keys = Keys::new(SecretKey::from_hex(DEFAULT_GNOSTR_KEY).expect("Failed to create Nostr keys from DEFAULT_GNOSTR_KEY"));
let cloned_keys = keys.clone();
let content = format!("Build manifest for get_file_hash v{}", package_version);
let mut tags = vec![
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
tags.extend(published_event_ids);
let event_builder = EventBuilder::text_note(content.clone()).tags(tags);
if let Some(event_id) = publish_nostr_event_if_release(
&mut client,
hex::encode(Sha256::digest(content.as_bytes())),
keys,
event_builder,
&mut relay_urls,
"build_manifest.json",
&output_dir,
&mut total_bytes_sent,
).await {
let build_manifest_event_id = Some(event_id);
// Publish metadata event for the build manifest
get_file_hash_core::publish_metadata_event(
&cloned_keys, // Use reference to cloned keys here
&relay_urls,
DEFAULT_PICTURE_URL,
DEFAULT_BANNER_URL,
&format!("build_manifest:{}", package_version),
).await;
let git_commit_hash = std::env::var("GIT_COMMIT_HASH").unwrap_or_default();
let git_branch = std::env::var("GIT_BRANCH").unwrap_or_default();
let repo_url = std::env::var("CARGO_PKG_REPOSITORY").unwrap();
let repo_name = std::env::var("CARGO_PKG_NAME").unwrap();
let repo_description = std::env::var("CARGO_PKG_DESCRIPTION").unwrap();
let output_dir = PathBuf::from(format!(".gnostr/build/{}", package_version));
if let Err(e) = fs::create_dir_all(&output_dir) {
println!("cargo:warning=Failed to create output directory {}: {}", output_dir.display(), e);
}
let announcement_keys = Keys::new(SecretKey::from_hex(build_manifest_event_id.unwrap().to_hex().as_str()).expect("Failed to create Nostr keys from build_manifest_event_id"));
let announcement_pubkey_hex = announcement_keys.public_key().to_string();
// Publish NIP-34 Repository Announcement
if let Some(_event_id) = get_repo_announcement_event(
&mut client,
&announcement_keys,
&relay_urls,
&repo_url,
&repo_name,
&repo_description,
&git_commit_hash,
&git_branch,
&output_dir,
&announcement_pubkey_hex
).await {
// Successfully published announcement
}
}
}
println!("cargo:warning=Total bytes sent to Nostr relays: {} bytes ({} MB)", total_bytes_sent, total_bytes_sent as f64 / 1024.0 / 1024.0);
}
}
// deterministic nostr event build example
# `build.rs` Documentation
This document explains the functionality of the `build.rs` script in this project. The `build.rs` script is a special Rust file that, if present, Cargo will compile and run *before* compiling the rest of your package. It's typically used for tasks that need to be performed during the build process, such as generating code, setting environment variables, or performing conditional compilation.
## Core Functionality
The `build.rs` script in this project performs the following key functions:
1. **Environment Variable Injection:** It computes various project-related values at compile time and injects them as environment variables (`CARGO_RUSTC_ENV=...`) that can be accessed by the main crate using `env!("VAR_NAME")`. This includes:
* `CARGO_PKG_NAME`: The name of the current package (from `Cargo.toml`).
* `CARGO_PKG_VERSION`: The version of the current package (from `Cargo.toml`).
* `GIT_COMMIT_HASH`: The full commit hash of the current Git HEAD (if in a Git repository).
* `GIT_BRANCH`: The name of the current Git branch (if in a Git repository).
* `CARGO_TOML_HASH`: The SHA-256 hash of the `Cargo.toml` file.
* `LIB_HASH`: The SHA-256 hash of the `src/lib.rs` file.
* `BUILD_HASH`: The SHA-256 hash of the `build.rs` file itself.
2. **Rerun Conditions:** It tells Cargo when to re-run the build script. This ensures that the injected environment variables and any conditional compilation logic are up-to-date if relevant files change:
* `Cargo.toml`
* `src/lib.rs`
* `build.rs`
* `.git/HEAD` (to detect changes in the Git repository like new commits or branch switches).
* `src/get_file_hash_core/src/online_relays_gps.csv` (conditionally, if the file exists).
3. **Conditional Nostr Event Publishing (Release Builds with `nostr` feature):**
If the project is being compiled in **release mode (`--release`)** and the **`nostr` feature is enabled (`--features nostr`)**, the `build.rs` script will connect to Nostr relays and publish events. This is intended for "deterministic Nostr event build examples" as indicated by the comments in the file.
* **Relay Management:** It retrieves a list of default relay URLs. During event publishing, it identifies and removes "unfriendly" or unresponsive relays (e.g., those with timeout, connection issues, or spam blocks) from the list for subsequent publications.
* **File Hashing and Key Generation:** For each Git-tracked file (when in a Git repository), it computes its SHA-256 hash. This hash is then used to derive a Nostr `SecretKey`.
* **Event Creation:**
* **Individual File Events:** For each Git-tracked file, a Nostr `text_note` event is created. This event includes tags for:
* `#file`: The path of the file.
* `#version`: The package version.
* `#commit`: The Git commit hash (if in a Git repository).
* `#branch`: The Git branch name (if in a Git repository).
* **Metadata Event:** It publishes a metadata event using `get_file_hash_core::publish_metadata_event`.
* **Linking Event (Build Manifest):** After processing all individual files, if any events were published, a final "build manifest" `text_note` event is created. This event links to all the individual file events that were published during the build using event tags.
* **Output Storage:** The JSON representation of successfully published Nostr events (specifically the `EventId`) is saved to `~/.gnostr/build/{package_version}/{file_path_str_sanitized}/{hash}/{public_key}/{event_id}.json`. This provides a local record of what was published.
### `publish_nostr_event_if_release` Function
This asynchronous helper function is responsible for:
* Adding relays to the Nostr client.
* Connecting to relays.
* Signing the provided `EventBuilder` to create an `Event`.
* Sending the event to the configured relays.
* Logging success or failure for each relay.
* Identifying and removing unresponsive relays from the `relay_urls` list.
* Saving the published event's JSON to the local filesystem.
### `should_remove_relay` Function
This helper function determines if a relay should be considered "unfriendly" or unresponsive based on common error messages received during Nostr event publication.
## Usage
To prevent 'Too many open files' errors, especially during builds and tests involving numerous file operations or subprocesses (like `git ls-files` or parallel test execution), it may be necessary to increase the file descriptor limit.
* **For local development**: Run `ulimit -n 4096` in your terminal session before executing `cargo build` or `cargo test`. This setting is session-specific.
* **For CI environments**: The `.github/workflows/rust.yml` workflow is configured to set `ulimit -n 4096` for relevant test steps to ensure consistent execution.
The values set by `build.rs` can be accessed in your Rust code (e.g., `src/lib.rs`) at compile time using the `env!` macro. For example:
```rust
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
```
The Nostr event publishing functionality of `build.rs` is primarily for release builds with the `nostr` feature enabled, allowing for the automatic, deterministic publication of project state to the Nostr network as part of the CI/CD pipeline.
## Example Commands
To interact with the `build.rs` script's features, especially those related to Nostr event publishing, you can use the following `cargo` commands:
* **Build in release mode with Nostr feature (verbose output):**
```bash
cargo build --release --workspace --features nostr -vv
```
* **Run tests for `get_file_hash_core` sequentially with Nostr feature and verbose logging (as in CI):**
```bash
RUST_LOG=info,nostr_sdk=debug,frost=debug cargo test -p get_file_hash_core --features nostr -- --test-threads 1 --nocapture
```
* **Run all workspace tests in release mode with Nostr feature:**
```bash
cargo test --workspace --release --features nostr
```
* **Build `get_file_hash_core` in release mode with Nostr feature (very verbose output):**
```bash
cargo build --release --features nostr -vv -p get_file_hash_core
```
* **Run `get_file_hash_core` tests in release mode with Nostr feature (very verbose output):**
```bash
cargo test --release --features nostr -vv -p get_file_hash_core
```
plan-dist-manifest.json
name: Rust
on:
push:
branches: [ "*" ]
pull_request:
branches: [ "*" ]
env:
CARGO_TERM_COLOR: always
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
RUST_LOG: info
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
features_args: ["", "--no-default-features", "--features nostr"]
steps:
- uses: actions/checkout@v4
- name: Build ${{ matrix.features_args }}
run: cargo build --workspace --verbose ${{ matrix.features_args }}
- name: Run workspace tests ${{ matrix.features_args }}
run: |
cargo test --workspace ${{ matrix.features_args }} -- --test-threads 1
- name: Run get_file_hash_core tests ${{ matrix.features_args }}
shell: bash
run: |
if [[ "${{ matrix.features_args }}" == "--features nostr" ]]; then
cargo test -p get_file_hash_core ${{ matrix.features_args }} -- --test-threads 1 --nocapture
else
cargo test -p get_file_hash_core ${{ matrix.features_args }} -- --test-threads 1
fi
- name: Run get_file_hash tests ${{ matrix.features_args }}
shell: bash
run: |
if [[ "${{ matrix.features_args }}" == "--features nostr" ]]; then
cargo test -p get_file_hash ${{ matrix.features_args }} -- --test-threads 1 --nocapture
else
cargo test -p get_file_hash ${{ matrix.features_args }} -- --test-threads 1
fi
- name: Build Release ${{ matrix.features_args }}
run: cargo build --workspace --release ${{ matrix.features_args }}
#![cfg(feature = "nostr")]
use frost_secp256k1_tr as frost;
use frost::keys::PublicKeyPackage;
use frost::round2::SignatureShare;
use frost::SigningPackage;
use hex;
use rand::thread_rng;
use std::collections::BTreeMap;
use sha2::Sha256;
use serde_json;
use sha2::Digest;
pub fn process_relay_share(
relay_payload_hex: &str,
signer_id_u16: u16,
_signing_package: &SigningPackage,
_pubkey_package: &PublicKeyPackage,
) -> Result<(), Box<dyn std::error::Error>> {
// In a real scenario, this function would deserialize the share, perform
// individual verification, and store it for aggregation.
// For this example, we'll just acknowledge receipt.
let _share_bytes = hex::decode(relay_payload_hex)?;
let _share = SignatureShare::deserialize(&_share_bytes)?;
let _identifier = frost::Identifier::try_from(signer_id_u16)?;
println!("✅ Share from Signer {} processed (simplified).", signer_id_u16);
Ok(())
}
pub fn simulate_frost_mailbox_coordinator() -> Result<(), Box<dyn std::error::Error>> {
let mut rng = thread_rng();
let (max_signers, min_signers) = (2, 2);
let (shares, pubkey_package) = frost::keys::generate_with_dealer(
max_signers,
min_signers,
frost::keys::IdentifierList::Default,
&mut rng,
)?;
let signer1_id = frost::Identifier::try_from(1 as u16)?;
let key_package1: frost::keys::KeyPackage = shares[&signer1_id].clone().try_into()?;
let signer2_id = frost::Identifier::try_from(2 as u16)?;
let key_package2: frost::keys::KeyPackage = shares[&signer2_id].clone().try_into()?;
let message = b"BIP-64MOD: Anchor Data Proposal v1";
let (nonces1, comms1) = frost::round1::commit(key_package1.signing_share(), &mut rng);
let (nonces2, comms2) = frost::round1::commit(key_package2.signing_share(), &mut rng);
let mut session_commitments = BTreeMap::new();
session_commitments.insert(signer1_id, comms1);
session_commitments.insert(signer2_id, comms2);
let signing_package = frost::SigningPackage::new(session_commitments.clone(), message);
let share1 = frost::round2::sign(&signing_package, &nonces1, &key_package1)?;
let share1_hex = hex::encode(share1.serialize());
let share2 = frost::round2::sign(&signing_package, &nonces2, &key_package2)?;
let share2_hex = hex::encode(share2.serialize());
println!("Coordinator listening for Nostr events (simulated)...");
process_relay_share(&share1_hex, 1_u16, &signing_package, &pubkey_package)?;
process_relay_share(&share2_hex, 2_u16, &signing_package, &pubkey_package)?;
println!("All required shares processed. Coordinator would now aggregate.");
Ok(())
}
/// Simulates a Signer producing a FROST signature share and preparing a Nostr event
/// to be sent to a coordinator via a "mailbox" relay.
///
/// In a real ROAST setup, signers would generate their share and post it
/// encrypted (e.g., using NIP-44) to a coordinator's "mailbox" on a Nostr relay.
/// This function demonstrates the creation of the signature share and the
/// construction of a *simplified* Nostr event JSON.
///
/// # Arguments
///
/// * `_identifier` - The FROST identifier of the signer. (Currently unused in this specific function body).
/// * `signing_package` - The FROST signing package received from the coordinator.
/// * `nonces` - The signer's nonces generated in Round 1.
/// * `key_package` - The signer's FROST key package.
/// * `coordinator_pubkey` - The hex-encoded public key of the ROAST coordinator,
/// used to tag the Nostr event.
///
/// # Returns
///
/// A `Result` containing the JSON string of the Nostr event if successful,
/// or a `Box<dyn std::error::Error>` if an error occurs.
pub fn create_signer_event(
_identifier: frost::Identifier,
signing_package: &frost::SigningPackage,
nonces: &frost::round1::SigningNonces,
key_package: &frost::keys::KeyPackage,
coordinator_pubkey: &str, // The Hex pubkey of the ROAST coordinator
) -> Result<String, Box<dyn std::error::Error>> {
// 1. Generate the partial signature share (Round 2 of FROST)
// This share is the core cryptographic output from the signer.
let share = frost::round2::sign(signing_package, nonces, key_package)?;
let share_bytes = share.serialize();
let share_hex = hex::encode(share_bytes);
// 2. Create a Session ID to tag the event
// This ID is derived from the signing package hash, allowing the coordinator
// to correlate shares belonging to the same signing session.
let mut hasher = Sha256::new();
hasher.update(signing_package.serialize()?);
let session_id = hex::encode(hasher.finalize());
// 3. Construct the Nostr Event JSON (Simplified)
// This JSON represents the event that a signer would post to a relay.
// In a production ROAST system, the 'content' field (the signature share)
// would be encrypted for the coordinator using NIP-44.
let event = serde_json::json!({
"kind": 4, // Example: Using Kind 4 (Private Message), though custom Kinds could be used for Sovereign Stack.
"pubkey": hex::encode(key_package.verifying_key().serialize()?.as_slice()), // Signer's public key
"created_at": 1712050000, // Example timestamp
"tags": [
["p", coordinator_pubkey], // 'p' tag: Directs the event to the coordinator.
["i", session_id], // 'i' tag: Provides a session identifier for filtering/requests.
["t", "frost-signature-share"] // 't' tag: A searchable label for the event type.
],
"content": share_hex, // The actual signature share (would be encrypted in production).
"id": "...", // Event ID (filled by relay upon publishing)
"sig": "..." // Event signature (filled by relay upon publishing)
});
Ok(event.to_string())
}
pub fn simulate_frost_mailbox_post_signer() -> Result<(), Box<dyn std::error::Error>> {
use rand::thread_rng;
use std::collections::BTreeMap;
use frost_secp256k1_tr as frost;
// This example simulates a single signer's role in a ROAST mailbox post workflow.
// The general workflow is:
// 1. Coordinator sends a request for signatures (e.g., on a BIP-64MOD proposal).
// 2. Signers receive the proposal, perform local verification.
// 3. Each signer generates their signature share and posts it (encrypted) to a
// Nostr relay, targeting the coordinator's mailbox.
// 4. The coordinator collects enough shares to aggregate the final signature.
let mut rng = thread_rng();
// For this example, we simulate a 2-of-2 threshold for simplicity.
let (max_signers, min_signers) = (2, 2);
////////////////////////////////////////////////////////////////////////////
// 1. Key Generation (Simulated Trusted Dealer)
////////////////////////////////////////////////////////////////////////////
// In a real distributed setup, this would be DKG. Here, a "trusted dealer"
// generates the shares and public key package.
let (shares, _pubkey_package) = frost::keys::generate_with_dealer(
max_signers,
min_signers,
frost::keys::IdentifierList::Default,
&mut rng,
)?;
// For a 2-of-2 scheme, we have two signers. Let's pick signer 1.
let signer1_id = frost::Identifier::try_from(1 as u16)?;
let key_package1: frost::keys::KeyPackage = shares[&signer1_id].clone().try_into()?;
let signer2_id = frost::Identifier::try_from(2 as u16)?;
let key_package2: frost::keys::KeyPackage = shares[&signer2_id].clone().try_into()?;
// The message that is to be signed (e.g., a hash of a Git commit or a Nostr event ID).
let message = b"This is a test message for ROAST mailbox post.";
////////////////////////////////////////////////////////////////////////////
// 2. Round 1: Commitment Phase (Signer's role)
////////////////////////////////////////////////////////////////////////////
// Each signer generates nonces and commitments.
let (nonces1, comms1) = frost::round1::commit(key_package1.signing_share(), &mut rng);
let (nonces2, comms2) = frost::round1::commit(key_package2.signing_share(), &mut rng);
// The coordinator collects these commitments. Here, we simulate by putting them in a BTreeMap.
let mut session_commitments = BTreeMap::new();
session_commitments.insert(signer1_id, comms1);
session_commitments.insert(signer2_id, comms2);
////////////////////////////////////////////////////////////////////////////
// 3. Signing Package Creation (Coordinator's role, simulated for context)
////////////////////////////////////////////////////////////////////////////
// The coordinator combines the collected commitments and the message to be signed
// into a signing package, which is then sent back to the signers.
let signing_package = frost::SigningPackage::new(session_commitments, message);
// Dummy coordinator public key. In a real scenario, this would be the
// actual public key of the ROAST coordinator, used for event tagging
// and encryption (NIP-44).
let coordinator_pubkey_hex = "0000000000000000000000000000000000000000000000000000000000000001";
////////////////////////////////////////////////////////////////////////////
// 4. Create the Signer Event (Signer's role)
////////////////////////////////////////////////////////////////////////////
// We demonstrate for signer 1. Signer 2 would perform a similar action.
let event_json_signer1 = create_signer_event(
signer1_id,
&signing_package,
&nonces1,
&key_package1,
coordinator_pubkey_hex,
)?;
println!("Generated Nostr Event for Signer 1 Mailbox Post:
{}", event_json_signer1);
// Similarly, Signer 2 would generate their event:
let event_json_signer2 = create_signer_event(
signer2_id,
&signing_package,
&nonces2,
&key_package2,
coordinator_pubkey_hex,
)?;
println!("Generated Nostr Event for Signer 2 Mailbox Post:
{}", event_json_signer2);
Ok(())
}
[package]
name = "get_file_hash_core"
version = { workspace = true }
edition = { workspace = true }
description = { workspace = true }
license = { workspace = true }
documentation = { workspace = true }
homepage = { workspace = true }
repository = { workspace = true }
authors = { workspace = true }
[features]
nostr = ["dep:nostr", "dep:nostr-sdk", "dep:serde_json", "dep:sha2", "dep:hex", "dep:reqwest", "dep:tokio", "dep:csv", "dep:url", "dep:frost-secp256k1-tr", "dep:rand"]
[dependencies]
sha2 = { workspace = true, optional = true }
nostr = { workspace = true, optional = true }
serde_json = { workspace = true, optional = true }
nostr-sdk = { workspace = true, optional = true }
hex = { workspace = true, optional = true }
csv = { workspace = true, optional = true }
url = { workspace = true, optional = true }
frost-secp256k1-tr = { workspace = true, optional = true }
rand = { workspace = true, optional = true }
[dev-dependencies]
sha2 = { workspace = true }
tempfile = { workspace = true }
nostr = { workspace = true }
nostr-sdk = { workspace = true }
serde_json = { workspace = true }
hex = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
csv = { workspace = true }
url = { workspace = true }
frost-secp256k1-tr = { workspace = true }
serial_test = { workspace = true, features = ["test_logging"] }
log = { workspace = true }
[build-dependencies]
reqwest = { workspace = true, features = ["json"], optional = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"], optional = true }
//! A simple command-line tool that calculates and displays the SHA-256 hash of
//! its own source file.
//!
//! This utility demonstrates how to use the `get_file_hash!` macro to obtain
//! the hash of a specified file at compile time and incorporate it into runtime
//! logic.
use get_file_hash_core::get_file_hash;
use sha2::{Digest, Sha256};
const README_TEMPLATE_PART1: &str = r##"# `get_file_hash` macro
This project provides a Rust procedural macro, `get_file_hash!`, designed to compute the SHA-256 hash of a specified file at compile time. This hash is then embedded directly into your compiled executable. This feature is invaluable for:
* **Integrity Verification:** Ensuring the deployed code hasn't been tampered with.
* **Versioning:** Embedding a unique identifier linked to the exact source code version.
* **Cache Busting:** Generating unique names for assets based on their content.
## Project Structure
* `get_file_hash_core`: A foundational crate containing the `get_file_hash!` macro definition.
* `get_file_hash`: The main library crate that re-exports the macro.
* `src/bin/get_file_hash.rs`: An example executable demonstrating the macro's usage by hashing its own source file and updating this `README.md`.
* `build.rs`: A build script that also utilizes the `get_file_hash!` macro to hash `Cargo.toml` during the build process.
## Usage of `get_file_hash!` Macro
To use the `get_file_hash!` macro, ensure you have `get_file_hash` (or `get_file_hash_core` for direct usage) as a dependency in your `Cargo.toml`.
### Example
```rust
use get_file_hash::get_file_hash;
use sha2::{Digest, Sha256};
fn main() {
// The macro resolves the path relative to CARGO_MANIFEST_DIR
let file_hash = get_file_hash!("src/lib.rs");
println!("The SHA-256 hash of src/lib.rs is: {}", file_hash);
}
```
"##;
const README_TEMPLATE_PART2: &str = r"## Setup and Building
1. **Clone the repository:**
```bash
git clone <repository-url>
cd <repository-name>
```
2. **Build the project:**
```bash
cargo build
```
During the build, `build.rs` will execute and print the hash of `Cargo.toml`.
3. **Run the example executable:**
```bash
cargo run --bin get_file_hash
```
This will print the hash of `src/bin/get_file_hash.rs` to your console.
## Updating this `README.md`
The hash information in this `README.md` is automatically generated by running the example executable.
To update it, execute:
```bash
cargo run --bin get_file_hash > README.md
```
## Current File Hash Information (of `src/bin/get_file_hash.rs`)
* **Target File:** `src/bin/get_file_hash.rs`
";
/// The main entry point of the application.
///
/// This function calculates the SHA-256 hash of the `get_file_hash.rs` source
/// file using a custom procedural macro and then prints the hash to the
/// console. It also includes a basic integrity verification check.
fn main() {
// Calculate the SHA-256 hash of the current file (`get_file_hash.rs`) at
// compile time. The `get_file_hash!` macro reads the file content and
// computes its hash.
let self_hash = get_file_hash!("get_file_hash.rs");
let status_message = if self_hash.starts_with("e3b0") {
"Warning: This hash represents an empty file."
} else {
"Integrity Verified."
};
print!("{}{}", README_TEMPLATE_PART1, README_TEMPLATE_PART2);
println!("* **SHA-256 Hash:** `{}`", self_hash);
println!("* **Status:** {}.\n", status_message);
}
#[cfg(feature = "nostr")]
use frost_secp256k1_tr as frost;
#[cfg(feature = "nostr")]
use rand::thread_rng;
#[cfg(feature = "nostr")]
use std::collections::BTreeMap;
#[cfg(feature = "nostr")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut rng = thread_rng();
let max_signers = 3;
let min_signers = 2;
////////////////////////////////////////////////////////////////////////////
// Round 0: Key Generation (Trusted Dealer)
////////////////////////////////////////////////////////////////////////////
// In a real P2P setup, you'd use Distributed Key Generation (DKG).
// For local testing/simulations, the trusted dealer is faster.
let (shares, pubkey_package) = frost::keys::generate_with_dealer(
max_signers,
min_signers,
frost::keys::IdentifierList::Default,
&mut rng,
)?;
// Verifying the public key exists
let group_public_key = pubkey_package.verifying_key();
println!("Group Public Key: {:?}", group_public_key);
////////////////////////////////////////////////////////////////////////////
// Round 1: Commitment
////////////////////////////////////////////////////////////////////////////
let message = b"BIP-64MOD Consensus Proposal";
let mut signing_commitments = BTreeMap::new();
let mut participant_nonces = BTreeMap::new();
// Participants 1 and 2 decide to sign
for i in 1..=min_signers {
let identifier = frost::Identifier::try_from(i as u16)?;
// Generate nonces and commitments
let (nonces, commitments) = frost::round1::commit(
shares[&identifier].signing_share(),
&mut rng,
);
signing_commitments.insert(identifier, commitments);
participant_nonces.insert(identifier, nonces);
}
////////////////////////////////////////////////////////////////////////////
// Round 2: Signing
////////////////////////////////////////////////////////////////////////////
let mut signature_shares = BTreeMap::new();
let signing_package = frost::SigningPackage::new(signing_commitments, message);
for i in 1..=min_signers {
let identifier = frost::Identifier::try_from(i as u16)?;
let nonces = &participant_nonces[&identifier];
// Each participant produces a signature share
let key_package: frost::keys::KeyPackage = shares[&identifier].clone().try_into()?;
let share = frost::round2::sign(&signing_package, nonces, &key_package)?;
signature_shares.insert(identifier, share);
}
////////////////////////////////////////////////////////////////////////////
// Finalization: Aggregation
////////////////////////////////////////////////////////////////////////////
let group_signature = frost::aggregate(
&signing_package,
&signature_shares,
&pubkey_package,
)?;
// Verification
group_public_key.verify(message, &group_signature)?;
println!("Threshold signature verified successfully!");
Ok(())
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example trusted-dealer --features nostr");
}
#[cfg(feature = "nostr")]
use frost_secp256k1_tr as frost;
#[cfg(feature = "nostr")]
use rand::thread_rng;
#[cfg(feature = "nostr")]
use std::collections::BTreeMap;
/// A simplified ROAST Coordinator that manages signing sessions
#[cfg(feature = "nostr")]
struct RoastCoordinator {
min_signers: u16,
_message: Vec<u8>,
commitments: BTreeMap<frost::Identifier, frost::round1::SigningCommitments>,
nonces: BTreeMap<frost::Identifier, frost::round1::SigningNonces>,
shares: BTreeMap<frost::Identifier, frost::round2::SignatureShare>,
}
#[cfg(feature = "nostr")]
impl RoastCoordinator {
fn new(min_signers: u16, message: &[u8]) -> Self {
Self {
min_signers,
_message: message.to_vec(),
commitments: BTreeMap::new(),
nonces: BTreeMap::new(),
shares: BTreeMap::new(),
}
}
/// ROAST Logic: Collect commitments until we hit the threshold.
/// In a real P2P system, this would be an async stream handler.
fn add_commitment(&mut self, id: frost::Identifier, comms: frost::round1::SigningCommitments, nonces: frost::round1::SigningNonces) {
if self.commitments.len() < self.min_signers as usize {
self.commitments.insert(id, comms);
self.nonces.insert(id, nonces);
}
}
/// ROAST Logic: Collect signature shares.
fn add_share(&mut self, id: frost::Identifier, share: frost::round2::SignatureShare) {
if self.shares.len() < self.min_signers as usize {
self.shares.insert(id, share);
}
}
fn is_ready_to_sign(&self) -> bool {
self.commitments.len() >= self.min_signers as usize
}
fn is_ready_to_aggregate(&self) -> bool {
self.shares.len() >= self.min_signers as usize
}
}
#[cfg(feature = "nostr")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut rng = thread_rng();
let (max_signers, min_signers) = (5, 3);
let message = b"BIP-64MOD Context: ROAST Coordination";
// 1. Setup: Generate keys (Dealer mode for simulation)
let (key_shares, pubkey_package) = frost::keys::generate_with_dealer(
max_signers,
min_signers,
frost::keys::IdentifierList::Default,
&mut rng,
)?;
let mut coordinator = RoastCoordinator::new(min_signers, message);
// 2. Round 1: Asynchronous Commitment Collection
// Simulate signers 1, 3, and 5 responding first (ROAST skips 2 and 4)
for &id_num in &[1, 3, 5] {
let id = frost::Identifier::try_from(id_num as u16)?;
let (nonces, comms) = frost::round1::commit(key_shares[&id].signing_share(), &mut rng);
// Signers store their nonces locally, send comms to coordinator
coordinator.add_commitment(id, comms, nonces);
// Note: Signer 2 was "offline", but ROAST doesn't care because we hit 3/5.
}
// 3. Round 2: Signing
if coordinator.is_ready_to_sign() {
let signing_package = frost::SigningPackage::new(coordinator.commitments.clone(), message);
let mut temp_shares = BTreeMap::new();
for &id in coordinator.commitments.keys() {
// In reality, coordinator sends signing_package to signers
// Here we simulate the signers producing shares
let nonces = &coordinator.nonces[&id];
let key_package: frost::keys::KeyPackage = key_shares[&id].clone().try_into()?;
let share = frost::round2::sign(&signing_package, &nonces, &key_package)?;
temp_shares.insert(id, share);
}
for (id, share) in temp_shares {
coordinator.add_share(id, share);
}
}
// 4. Finalization: Aggregation
if coordinator.is_ready_to_aggregate() {
let signing_package = frost::SigningPackage::new(coordinator.commitments.clone(), message);
let group_signature = frost::aggregate(
&signing_package,
&coordinator.shares,
&pubkey_package,
)?;
pubkey_package.verifying_key().verify(message, &group_signature)?;
println!("ROAST-coordinated signature verified!");
}
Ok(())
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example roast-experiment --features nostr");
}
#[tokio::main]
#[cfg(feature = "nostr")]
#[allow(unused_imports)]
async fn main() {
use get_file_hash_core::repository_announcement;
use get_file_hash_core::get_file_hash;
use nostr_sdk::Keys;
use sha2::{Digest, Sha256};
use nostr_sdk::EventId;
use std::str::FromStr;
let keys = Keys::generate();
let relay_urls = get_file_hash_core::get_relay_urls();
let project_name = "my-awesome-repo-example";
let description = "A fantastic new project example.";
let clone_url = "git@github.com:user/my-awesome-repo-example.git";
// Dummy EventId for examples that require a build_manifest_event_id
const DUMMY_BUILD_MANIFEST_ID_STR: &str = "f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0";
let dummy_build_manifest_id = EventId::from_str(DUMMY_BUILD_MANIFEST_ID_STR).unwrap();
// Example 1: Without build_manifest_event_id
println!("Publishing repository announcement without build_manifest_event_id...");
repository_announcement!(
&keys,
&relay_urls,
project_name,
description,
clone_url,
"../Cargo.toml" // Use a known file in your project
);
println!("Repository announcement without build_manifest_event_id published.");
// Example 2: With build_manifest_event_id
println!("Publishing repository announcement with build_manifest_event_id...");
repository_announcement!(
&keys,
&relay_urls,
project_name,
description,
clone_url,
"../Cargo.toml", // Use a known file in your project
Some(&dummy_build_manifest_id)
);
println!("Repository announcement with build_manifest_event_id published.");
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example repository_announcement --features nostr");
}
#[tokio::main]
#[cfg(feature = "nostr")]
async fn main() {
use get_file_hash_core::publish_repository_state;
use nostr_sdk::Keys;
let keys = Keys::generate();
let relay_urls = get_file_hash_core::get_relay_urls();
let d_tag = "my-awesome-repo-example";
let branch_name = "main";
let commit_id = "a1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0";
println!("Publishing repository state...");
publish_repository_state!(
&keys,
&relay_urls,
d_tag,
branch_name,
commit_id
);
println!("Repository state published.");
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example publish_repository_state --features nostr");
}
#[tokio::main]
#[cfg(feature = "nostr")]
async fn main() {
use get_file_hash_core::publish_pr_update;
use nostr_sdk::Keys;
use nostr_sdk::EventId;
use std::str::FromStr;
let keys = Keys::generate();
let relay_urls = get_file_hash_core::get_relay_urls();
let d_tag = "my-awesome-repo-example";
let pr_event_id = EventId::from_str("f6e4d6a1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9").unwrap(); // Example PR Event ID
let updated_commit_id = "z9y8x7w6v5u4t3s2r1q0p9o8n7m6l5k4j3i2h1g0";
let updated_clone_url = "git@github.com:user/my-feature-branch-v2.git";
// Dummy EventId for examples that require a build_manifest_event_id
const DUMMY_BUILD_MANIFEST_ID_STR: &str = "f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0";
let dummy_build_manifest_id = EventId::from_str(DUMMY_BUILD_MANIFEST_ID_STR).unwrap();
// Example 1: Without build_manifest_event_id
println!("Publishing PR update without build_manifest_event_id...");
publish_pr_update!(
&keys,
&relay_urls,
d_tag,
&pr_event_id,
updated_commit_id,
updated_clone_url
);
println!("PR update without build_manifest_event_id published.");
// Example 2: With build_manifest_event_id
println!("Publishing PR update with build_manifest_event_id...");
publish_pr_update!(
&keys,
&relay_urls,
d_tag,
&pr_event_id,
updated_commit_id,
updated_clone_url,
Some(&dummy_build_manifest_id)
);
println!("PR update with build_manifest_event_id published.");
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example publish_pr_update --features nostr");
}
#[tokio::main]
#[cfg(feature = "nostr")]
async fn main() {
use get_file_hash_core::publish_issue;
use nostr_sdk::Keys;
use nostr_sdk::EventId;
use std::str::FromStr;
let keys = Keys::generate();
let relay_urls = get_file_hash_core::get_relay_urls();
let d_tag = "my-awesome-repo-example";
let issue_id = "123";
let title = "Bug: Fix authentication flow example";
let content = "The authentication flow is currently broken when users try to log in with invalid credentials. It crashes instead of showing an error message.";
// Dummy EventId for examples that require a build_manifest_event_id
const DUMMY_BUILD_MANIFEST_ID_STR: &str = "f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0";
let dummy_build_manifest_id = EventId::from_str(DUMMY_BUILD_MANIFEST_ID_STR).unwrap();
// Example 1: Without build_manifest_event_id
println!("Publishing issue without build_manifest_event_id...");
publish_issue!(
&keys,
&relay_urls,
d_tag,
issue_id,
title,
content
);
println!("Issue without build_manifest_event_id published.");
// Example 2: With build_manifest_event_id
println!("Publishing issue with build_manifest_event_id...");
publish_issue!(
&keys,
&relay_urls,
d_tag,
issue_id,
title,
content,
Some(&dummy_build_manifest_id)
);
println!("Issue with build_manifest_event_id published.");
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example publish_issue --features nostr");
}
#[cfg(feature = "nostr")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
get_file_hash_core::frost_mailbox_logic::simulate_frost_mailbox_post_signer()
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example frost_mailbox_post --features nostr");
}
#[cfg(feature = "nostr")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
get_file_hash_core::frost_mailbox_logic::simulate_frost_mailbox_coordinator()
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example frost_mailbox --features nostr");
}
#[cfg(feature = "nostr")]
use frost_secp256k1_tr as frost; // MUST use the -tr variant for BIP-340/Nostr
#[cfg(feature = "nostr")]
use rand::thread_rng;
#[cfg(feature = "nostr")]
use serde_json::json;
#[cfg(feature = "nostr")]
use sha2::{Digest, Sha256};
#[cfg(feature = "nostr")]
use std::collections::BTreeMap;
#[cfg(feature = "nostr")]
use hex;
#[cfg(feature = "nostr")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut rng = thread_rng();
let (max_signers, min_signers) = (3, 2);
// 1. Setup Nostr Event Metadata
let pubkey_hex = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"; // Example
let created_at = 1712050000;
let kind = 1;
let content = "Hello from ROAST threshold signatures!";
// 2. Serialize for Nostr ID (per NIP-01)
let event_json = json!([
0,
pubkey_hex,
created_at,
kind,
[],
content
]).to_string();
let mut hasher = Sha256::new();
hasher.update(event_json.as_bytes());
let event_id = hasher.finalize(); // This 32-byte hash is our signing message
// 3. FROST/ROAST Key Generation
let (shares, pubkey_package) = frost::keys::generate_with_dealer(
max_signers,
min_signers,
frost::keys::IdentifierList::Default,
&mut rng,
)?;
// 4. ROAST Coordination Simulation (Round 1: Commitments)
// In ROAST, the coordinator keeps a "session" open and collects commitments
let mut session_commitments = BTreeMap::new();
let mut signer_nonces = BTreeMap::new();
// Signers 1 and 3 respond first (Signer 2 is offline/slow)
for &id_val in &[1, 3] {
let id = frost::Identifier::try_from(id_val as u16)?;
let (nonces, comms) = frost::round1::commit(shares[&id].signing_share(), &mut rng);
session_commitments.insert(id, comms);
signer_nonces.insert(id, nonces);
}
// 5. Round 2: Signing the Nostr ID
let signing_package = frost::SigningPackage::new(session_commitments, &event_id);
let mut signature_shares = BTreeMap::new();
for (id, nonces) in signer_nonces {
let key_package: frost::keys::KeyPackage = shares[&id].clone().try_into()?;
let share = frost::round2::sign(&signing_package, &nonces, &key_package)?;
signature_shares.insert(id, share);
}
// 6. Aggregate into a BIP-340 Signature
let group_signature = frost::aggregate(
&signing_package,
&signature_shares,
&pubkey_package,
)?;
// 7. Verification (using BIP-340 logic)
pubkey_package.verifying_key().verify(&event_id, &group_signature)?;
println!("Nostr Event ID: {}", hex::encode(event_id));
println!("Threshold Signature (BIP-340): {}", hex::encode(group_signature.serialize()?));
println!("Successfully signed Nostr event using ROAST/FROST!");
Ok(())
}
#[cfg(not(feature = "nostr"))]
fn main() {
println!("This example requires the 'nostr' feature. Please run with: cargo run --example frost_bip_340 --features nostr");
}
# dist plan --output-format=json > plan-dist-manifest.json
# Config for 'dist'
[workspace]
members = ["cargo:.", "cargo:src/get_file_hash_core"]
# Config for 'dist'
[dist]
# The preferred dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.30.3"
# CI backends to support
ci = "github"
# The installers to generate for each app
installers = ["shell", "powershell", "homebrew", "msi"]
# A GitHub repo to push Homebrew formulas to
tap = "gnostr-org/homebrew-gnostr-org"
# Path that installers should place binaries in
install-path = "CARGO_HOME"
# Publish jobs to run in CI
publish-jobs = ["homebrew"]
# Whether to install an updater program
install-updater = true
# Target platforms to build apps for (Rust target-triple syntax)
targets = ["aarch64-apple-darwin", "aarch64-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "x86_64-pc-windows-msvc"]
# Skip checking whether the specified configuration files are up to date
allow-dirty = ["ci"]
/// deterministic nostr event build example
// deterministic nostr event build example
use get_file_hash_core::get_file_hash;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use get_file_hash_core::{get_git_tracked_files, DEFAULT_GNOSTR_KEY, DEFAULT_PICTURE_URL, DEFAULT_BANNER_URL};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use nostr_sdk::{EventBuilder, Keys, EventId, Tag, SecretKey, JsonUtil, Kind, Event};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use serde_json::to_string;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use std::fs;
use std::path::PathBuf;
use sha2::{Digest, Sha256};
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use ::hex;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
use std::io::Write;
#[cfg(all(not(debug_assertions), feature = "nostr"))]
fn should_remove_relay(error_msg: &str) -> bool {
error_msg.contains("relay not connected") ||
error_msg.contains("not in web of trust") ||
error_msg.contains("blocked: not authorized") ||
error_msg.contains("timeout") ||
error_msg.contains("blocked: spam not permitted") ||
error_msg.contains("relay experienced an error trying to publish the latest event") ||
error_msg.contains("duplicate: event already broadcast")
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
fn write_event_json_to_file(
output_dir: &PathBuf,
filename: &str,
event: &Event,
) -> Option<()> {
let file_path = output_dir.join(filename);
if let Some(parent) = file_path.parent() {
if let Err(e) = fs::create_dir_all(parent) {
println!("cargo:warning=Failed to create parent directories for {}: {}", file_path.display(), e);
return None;
}
}
if let Err(e) = fs::File::create(&file_path).and_then(|mut file| write!(file, "{}", event.as_json())) {
println!("Failed to write event JSON to file {}: {}", file_path.display(), e);
None
} else {
println!("Successfully wrote event JSON to {}", file_path.display());
Some(())
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
async fn publish_nostr_event_if_release(
client: &mut nostr_sdk::Client,
hash: String,
keys: Keys,
event_builder: EventBuilder,
relay_urls: &mut Vec<String>,
file_path_str: &str,
output_dir: &PathBuf,
total_bytes_sent: &mut usize,
) -> Option<EventId> {
let public_key = keys.public_key().to_string();
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await { Ok(event_output) => {
println!("cargo:warning=Published Nostr event for {}: {}", file_path_str, event_output.val);
let event_json_size = to_string(&event).map(|s| s.as_bytes().len()).unwrap_or(0);
// Print successful relays
for relay_url in event_output.success.iter() {
println!("cargo:warning=Successfully published to relay: {} ({} bytes)", relay_url, event_json_size);
*total_bytes_sent += event_json_size;
}
// Print failed relays and remove "unfriendly" relays from the list
for (relay_url, error_msg) in event_output.failed.iter() {
if should_remove_relay(error_msg) {
if let Err(e) = client.remove_relay(relay_url).await {
println!("cargo:warning=Failed to remove relay {}: {}", relay_url, e);
}
// println!("cargo:warning=Removed relay {}", relay_url);
}
}
let filename = format!("{}/{}/{}/{}.json", file_path_str, hash, public_key.clone(), event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr event for {}: {}", file_path_str, e);
None
},
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
pub async fn get_repo_announcement_event(
client: &mut nostr_sdk::Client,
_keys: &Keys,
relay_urls: &Vec<String>,
repo_url: &str,
repo_name: &str,
repo_description: &str,
git_commit_hash: &str,
git_branch: &str,
output_dir: &PathBuf,
public_key_hex: &str,
) -> Option<EventId> {
let mut tags = vec![
Tag::parse(["d", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["name", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["description", repo_description].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["web", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["clone", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["r", git_commit_hash, "euc"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["commit", git_commit_hash].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["branch", git_branch].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["maintainers", "gnostr"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
//Tag::parse(["t", "personal-fork"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["t", "gnostr"].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["t", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
// Append each relay url
for relay in relay_urls {
tags.push(Tag::parse(["relays", relay].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap());
}
let event_builder = EventBuilder::new(Kind::Custom(30617), repo_description).tags(tags);
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await {
Ok(event_output) => {
println!("cargo:warning=Published Nostr Repository Announcement for {}: {}", repo_name, event_output.val);
let filename = format!("30617/{}/{}/{}.json", repo_name, public_key_hex, event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr Repository Announcement for {}: {}", repo_name, e);
None
},
}
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
pub async fn get_repo_patch_event(
client: &mut nostr_sdk::Client,
_keys: &Keys,
_relay_urls: &Vec<String>,
repo_url: &str,
repo_name: &str,
repo_description: &str,
git_commit_hash: &str,
git_branch: &str,
output_dir: &PathBuf,
public_key_hex: &str,
) -> Option<EventId> {
let tags = vec![
Tag::parse(["r", repo_url].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["name", repo_name].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["description", repo_description].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["commit", git_commit_hash].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["branch", git_branch].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
let event_builder = EventBuilder::new(Kind::Custom(1617), repo_description).tags(tags);
let event = client.sign_event_builder(event_builder).await.unwrap();
match client.send_event(&event).await {
Ok(event_output) => {
println!("cargo:warning=Published Nostr Repository Announcement for {}: {}", repo_name, event_output.val);
let filename = format!("30617/{}/{}/{}.json", repo_name, public_key_hex, event_output.val.to_string());
write_event_json_to_file(output_dir, &filename, &event);
Some(event_output.val)
},
Err(e) => {
println!("cargo:warning=Failed to publish Nostr Repository Announcement for {}: {}", repo_name, e);
None
},
}
}
#[tokio::main]
async fn main() {
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let is_git_repo = std::path::Path::new(&manifest_dir).join(".git").exists();
println!("cargo:rustc-env=CARGO_PKG_NAME={}", env!("CARGO_PKG_NAME"));
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", env!("CARGO_PKG_VERSION"));
if is_git_repo {
let git_commit_hash_output = std::process::Command::new("git")
.args(&["rev-parse", "HEAD"])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.expect("Failed to execute git command for commit hash");
let git_commit_hash_str = if git_commit_hash_output.status.success() && !git_commit_hash_output.stdout.is_empty() {
String::from_utf8(git_commit_hash_output.stdout).unwrap().trim().to_string()
} else {
println!("cargo:warning=Git commit hash command failed or returned empty. Status: {:?}, Stderr: {}",
git_commit_hash_output.status, String::from_utf8_lossy(&git_commit_hash_output.stderr));
String::new()
};
println!("cargo:rustc-env=GIT_COMMIT_HASH={}", git_commit_hash_str);
let git_branch_output = std::process::Command::new("git")
.args(&["rev-parse", "--abbrev-ref", "HEAD"])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.expect("Failed to execute git command for branch name");
let git_branch_str = if git_branch_output.status.success() && !git_branch_output.stdout.is_empty() {
String::from_utf8(git_branch_output.stdout).unwrap().trim().to_string()
} else {
println!("cargo:warning=Git branch command failed or returned empty. Status: {:?}, Stderr: {}",
git_branch_output.status, String::from_utf8_lossy(&git_branch_output.stderr));
String::new()
};
println!("cargo:rustc-env=GIT_BRANCH={}", git_branch_str);
} else {
println!("cargo:rustc-env=GIT_COMMIT_HASH=");
println!("cargo:rustc-env=GIT_BRANCH=");
}
println!("cargo:rerun-if-changed=.git/HEAD");
//#[cfg(all(not(debug_assertions), feature = "nostr"))]
//let relay_urls = get_file_hash_core::get_relay_urls();
let cargo_toml_hash = get_file_hash!("Cargo.toml");
println!("cargo:rustc-env=CARGO_TOML_HASH={}", cargo_toml_hash);
let lib_hash = get_file_hash!("src/lib.rs");
println!("cargo:rustc-env=LIB_HASH={}", lib_hash);
let build_hash = get_file_hash!("build.rs");
println!("cargo:rustc-env=BUILD_HASH={}", build_hash);
println!("cargo:rerun-if-changed=Cargo.toml");
println!("cargo:rerun-if-changed=src/lib.rs");
println!("cargo:rerun-if-changed=build.rs");
let online_relays_csv_path = PathBuf::from(&manifest_dir).join("src/get_file_hash_core/src/online_relays_gps.csv");
if online_relays_csv_path.exists() {
println!("cargo:rerun-if-changed={}", online_relays_csv_path.to_str().unwrap());
}
#[cfg(all(not(debug_assertions), feature = "nostr"))]
if cfg!(not(debug_assertions)) {
println!("cargo:warning=Nostr feature enabled: Build may take longer due to network operations (publishing events to relays).");
// This code only runs in release builds
let package_version = std::env::var("CARGO_PKG_VERSION").unwrap();
let output_dir = PathBuf::from(format!(".gnostr/build/{}", package_version));
if let Err(e) = fs::create_dir_all(&output_dir) {
println!("cargo:warning=Failed to create output directory {}: {}", output_dir.display(), e);
}
let files_to_publish: Vec<String> = get_git_tracked_files(&PathBuf::from(&manifest_dir));
// Initialize client and keys once
let initial_keys = Keys::new(SecretKey::from_hex(&hex::encode(Sha256::digest("initial_seed".as_bytes()))).expect("Failed to create initial Nostr keys"));
let mut client = nostr_sdk::Client::new(initial_keys.clone());
let mut relay_urls = get_file_hash_core::get_relay_urls();
// Add relays to the client
for relay_url in relay_urls.iter() {
if let Err(e) = client.add_relay(relay_url).await {
println!("cargo:warning=Failed to add relay {}: {}", relay_url, e);
}
}
client.connect().await;
println!("cargo:warning=Added and connected to {} relays.", relay_urls.len());
let mut published_event_ids: Vec<Tag> = Vec::new();
let mut total_bytes_sent: usize = 0;
for file_path_str in &files_to_publish {
println!("cargo:warning=Processing file: {}", file_path_str);
match fs::read(file_path_str) {
Ok(bytes) => {
let mut hasher = Sha256::new();
hasher.update(&bytes);
let result = hasher.finalize();
let file_hash_hex = hex::encode(result);
match SecretKey::from_hex(&file_hash_hex.clone()) {
Ok(secret_key) => {
let keys = Keys::new(secret_key);
let content = String::from_utf8_lossy(&bytes).into_owned();
let tags = vec![
Tag::parse(["file", file_path_str].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["version", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
let event_builder = EventBuilder::text_note(content).tags(tags);
if let Some(event_id) = publish_nostr_event_if_release(&mut client, file_hash_hex, keys.clone(), event_builder, &mut relay_urls, file_path_str, &output_dir, &mut total_bytes_sent).await {
published_event_ids.push(Tag::event(event_id));
}
// Publish metadata event
get_file_hash_core::publish_metadata_event(
&keys,
&relay_urls,
DEFAULT_PICTURE_URL,
DEFAULT_BANNER_URL,
file_path_str,
).await;
}
Err(e) => {
println!("cargo:warning=Failed to derive Nostr secret key for {}: {}", file_path_str, e);
}
}
}
Err(e) => {
println!("cargo:warning=Failed to read file {}: {}", file_path_str, e);
}
}
}
// Create and publish the build_manifest
if !published_event_ids.is_empty() {
//TODO this will be either the default or detected from env vars PRIVATE_KEY
let keys = Keys::new(SecretKey::from_hex(DEFAULT_GNOSTR_KEY).expect("Failed to create Nostr keys from DEFAULT_GNOSTR_KEY"));
let cloned_keys = keys.clone();
let content = format!("Build manifest for get_file_hash v{}", package_version);
let mut tags = vec![
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
Tag::parse(["build_manifest", &package_version].iter().map(ToString::to_string).collect::<Vec<String>>()).unwrap(),
];
tags.extend(published_event_ids);
let event_builder = EventBuilder::text_note(content.clone()).tags(tags);
if let Some(event_id) = publish_nostr_event_if_release(
&mut client,
hex::encode(Sha256::digest(content.as_bytes())),
keys,
event_builder,
&mut relay_urls,
"build_manifest.json",
&output_dir,
&mut total_bytes_sent,
).await {
let build_manifest_event_id = Some(event_id);
// Publish metadata event for the build manifest
get_file_hash_core::publish_metadata_event(
&cloned_keys, // Use reference to cloned keys here
&relay_urls,
DEFAULT_PICTURE_URL,
DEFAULT_BANNER_URL,
&format!("build_manifest:{}", package_version),
).await;
let git_commit_hash = std::env::var("GIT_COMMIT_HASH").unwrap_or_default();
let git_branch = std::env::var("GIT_BRANCH").unwrap_or_default();
let repo_url = std::env::var("CARGO_PKG_REPOSITORY").unwrap();
let repo_name = std::env::var("CARGO_PKG_NAME").unwrap();
let repo_description = std::env::var("CARGO_PKG_DESCRIPTION").unwrap();
let output_dir = PathBuf::from(format!(".gnostr/build/{}", package_version));
if let Err(e) = fs::create_dir_all(&output_dir) {
println!("cargo:warning=Failed to create output directory {}: {}", output_dir.display(), e);
}
let announcement_keys = Keys::new(SecretKey::from_hex(build_manifest_event_id.unwrap().to_hex().as_str()).expect("Failed to create Nostr keys from build_manifest_event_id"));
let announcement_pubkey_hex = announcement_keys.public_key().to_string();
// Publish NIP-34 Repository Announcement
if let Some(_event_id) = get_repo_announcement_event(
&mut client,
&announcement_keys,
&relay_urls,
&repo_url,
&repo_name,
&repo_description,
&git_commit_hash,
&git_branch,
&output_dir,
&announcement_pubkey_hex
).await {
// Successfully published announcement
}
}
}
println!("cargo:warning=Total bytes sent to Nostr relays: {} bytes", total_bytes_sent);
}
}
// deterministic nostr event build example
# `build.rs` Documentation
This document explains the functionality of the `build.rs` script in this project. The `build.rs` script is a special Rust file that, if present, Cargo will compile and run *before* compiling the rest of your package. It's typically used for tasks that need to be performed during the build process, such as generating code, setting environment variables, or performing conditional compilation.
## Core Functionality
The `build.rs` script in this project performs the following key functions:
1. **Environment Variable Injection:** It computes various project-related values at compile time and injects them as environment variables (`CARGO_RUSTC_ENV=...`) that can be accessed by the main crate using `env!("VAR_NAME")`. This includes:
* `CARGO_PKG_NAME`: The name of the current package (from `Cargo.toml`).
* `CARGO_PKG_VERSION`: The version of the current package (from `Cargo.toml`).
* `GIT_COMMIT_HASH`: The full commit hash of the current Git HEAD (if in a Git repository).
* `GIT_BRANCH`: The name of the current Git branch (if in a Git repository).
* `CARGO_TOML_HASH`: The SHA-256 hash of the `Cargo.toml` file.
* `LIB_HASH`: The SHA-256 hash of the `src/lib.rs` file.
* `BUILD_HASH`: The SHA-256 hash of the `build.rs` file itself.
2. **Rerun Conditions:** It tells Cargo when to re-run the build script. This ensures that the injected environment variables and any conditional compilation logic are up-to-date if relevant files change:
* `Cargo.toml`
* `src/lib.rs`
* `build.rs`
* `.git/HEAD` (to detect changes in the Git repository like new commits or branch switches).
* `src/get_file_hash_core/src/online_relays_gps.csv` (conditionally, if the file exists).
3. **Conditional Nostr Event Publishing (Release Builds with `nostr` feature):**
If the project is being compiled in **release mode (`--release`)** and the **`nostr` feature is enabled (`--features nostr`)**, the `build.rs` script will connect to Nostr relays and publish events. This is intended for "deterministic Nostr event build examples" as indicated by the comments in the file.
* **Relay Management:** It retrieves a list of default relay URLs. During event publishing, it identifies and removes "unfriendly" or unresponsive relays (e.g., those with timeout, connection issues, or spam blocks) from the list for subsequent publications.
* **File Hashing and Key Generation:** For each Git-tracked file (when in a Git repository), it computes its SHA-256 hash. This hash is then used to derive a Nostr `SecretKey`.
* **Event Creation:**
* **Individual File Events:** For each Git-tracked file, a Nostr `text_note` event is created. This event includes tags for:
* `#file`: The path of the file.
* `#version`: The package version.
* `#commit`: The Git commit hash (if in a Git repository).
* `#branch`: The Git branch name (if in a Git repository).
* **Metadata Event:** It publishes a metadata event using `get_file_hash_core::publish_metadata_event`.
* **Linking Event (Build Manifest):** After processing all individual files, if any events were published, a final "build manifest" `text_note` event is created. This event links to all the individual file events that were published during the build using event tags.
* **Output Storage:** The JSON representation of successfully published Nostr events (specifically the `EventId`) is saved to `~/.gnostr/build/{package_version}/{file_path_str_sanitized}/{hash}/{public_key}/{event_id}.json`. This provides a local record of what was published.
### `publish_nostr_event_if_release` Function
This asynchronous helper function is responsible for:
* Adding relays to the Nostr client.
* Connecting to relays.
* Signing the provided `EventBuilder` to create an `Event`.
* Sending the event to the configured relays.
* Logging success or failure for each relay.
* Identifying and removing unresponsive relays from the `relay_urls` list.
* Saving the published event's JSON to the local filesystem.
### `should_remove_relay` Function
This helper function determines if a relay should be considered "unfriendly" or unresponsive based on common error messages received during Nostr event publication.
## Usage
To prevent 'Too many open files' errors, especially during builds and tests involving numerous file operations or subprocesses (like `git ls-files` or parallel test execution), it may be necessary to increase the file descriptor limit.
* **For local development**: Run `ulimit -n 4096` in your terminal session before executing `cargo build` or `cargo test`. This setting is session-specific.
* **For CI environments**: The `.github/workflows/rust.yml` workflow is configured to set `ulimit -n 4096` for relevant test steps to ensure consistent execution.
The values set by `build.rs` can be accessed in your Rust code (e.g., `src/lib.rs`) at compile time using the `env!` macro. For example:
```rust
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
```
The Nostr event publishing functionality of `build.rs` is primarily for release builds with the `nostr` feature enabled, allowing for the automatic, deterministic publication of project state to the Nostr network as part of the CI/CD pipeline.
## Example Commands
To interact with the `build.rs` script's features, especially those related to Nostr event publishing, you can use the following `cargo` commands:
* **Build in release mode with Nostr feature (verbose output):**
```bash
cargo build --release --workspace --features nostr -vv
```
* **Run tests for `get_file_hash_core` sequentially with Nostr feature and verbose logging (as in CI):**
```bash
RUST_LOG=info,nostr_sdk=debug,frost=debug cargo test -p get_file_hash_core --features nostr -- --test-threads 1 --nocapture
```
* **Run all workspace tests in release mode with Nostr feature:**
```bash
cargo test --workspace --release --features nostr
```
* **Build `get_file_hash_core` in release mode with Nostr feature (very verbose output):**
```bash
cargo build --release --features nostr -vv -p get_file_hash_core
```
* **Run `get_file_hash_core` tests in release mode with Nostr feature (very verbose output):**
```bash
cargo test --release --features nostr -vv -p get_file_hash_core
```