Skip to content

Instantly share code, notes, and snippets.

View msminhas93's full-sized avatar

Manpreet Singh msminhas93

View GitHub Profile
for row in document.select(&row_selector) {
let title_selector = Selector::parse("td.title > span.titleline > a")?;
if let Some(title_element) = row.select(&title_selector).next() {
let title = title_element.inner_html();
let link = title_element.value().attr("href").unwrap_or("").to_string();
submissions.push(Submission { title, link });
}
}
let document = Html::parse_document(&response);
let row_selector = Selector::parse("tr.athing")?;
let url = format!("<https://news.ycombinator.com/news?p={}>", page);
let response = get(&url)?.text()?;
pub fn fetch_hacker_news(page: usize) -> Result<Page, Box<dyn Error>> {
// ... (implementation details)
}
@msminhas93
msminhas93 / gist_script.txt
Last active September 2, 2024 00:35
rust cli blog post scripts
pub struct Submission {
pub title: String,
pub link: String,
}
pub struct Page {
pub submissions: Vec<Submission>,
pub current_page: usize,
pub total_pages: usize,
}
@msminhas93
msminhas93 / gist_script.txt
Created September 2, 2024 00:23
A sample Gist created from Jupyter Notebook
# Your code here
print("Hello, World!")
We can make this file beautiful and searchable if this error is corrected: Unclosed quoted field in line 4.
Metric,Behavior Across Classifier Types,Impact of Class Imbalance,When to Use,Key Considerations
Accuracy,"- Random: Equal to 0.5. - All 1: Equals positive class proportion - All 0: Equals negative class proportion","- Highly sensitive; can be misleading in imbalanced datasets","- Use in balanced datasets or when all classes are equally important","- Can be deceptive in imbalanced datasets; consider other metrics for a more comprehensive evaluation"
Precision,"- Random: Macro average is equal to 0.5. - All 1: Class 0 precision is 0. - All 0: Class 1 precision 0.","- Cases where the number of actual positives is very, very low, say 1-2 examples in total, precision is less meaningful and less useful as a metric","- When the cost of false positives is high (e.g., spam detection, medical diagnosis)","- Should be balanced with recall; precision alone may not capture overall performance"
Recall,"- Random: Equal to 0.5 - All 1: Macro average 0.5 always. - All 0: Macro average always 0.5","- In an imbalanced dataset
# if you want to iterate over all the samples in the current view
for sample in session.view:
softmax_values = torch.softmax(torch.from_numpy(sample.predictions.logits),0).numpy()
wine_confidence = softmax_values[red_wine_index] + softmax_values[wine_glass_index]
bagel_confidence = softmax_values[bagel_index]
print(wine_confidence, bagel_confidence)
# if you want to iterate over the selected samples in the datset
for sample in dataset[session.selected]:
softmax_values = torch.softmax(torch.from_numpy(sample.predictions.logits),0).numpy()
wine_confidence = softmax_values[red_wine_index] + softmax_values[wine_glass_index]
cm = results.plot_confusion_matrix()
cm.show()
session.plots.attach(cm)
results.print_report()