Merge branch 'master' of waschsauger:git/dirty-helpers

master
neingeist 9 years ago
commit 86a4386cb3

@ -0,0 +1,65 @@
#!/usr/bin/env ruby
# Find
# unused
# directories
# by
# looking
# at
# the
# atimes
# of
# the
# contained
# files.
#
require 'optparse'
options = {}
options[:days] = 180
def used_recently?(path, days)
if File.directory?(path)
used_recently = false
empty = true
Dir.new(path).each do |entry|
if entry != "." && entry != ".."
empty = false
if used_recently?("#{path}/#{entry}", days)
used_recently = true
end
end
end
if !used_recently && !empty
puts "unused: #{path}"
end
return used_recently
elsif File.file?(path) || File.socket?(path) || File.symlink?(path)
return (File.lstat(path).atime >= Time.now() - (days * 24 * 3600))
else
puts "Unknown file type: #{path}"
exit 1
end
end
# Parse options
OptionParser.new do |opts|
opts.banner = "Usage: #{$0} [options] DIRECTORY..."
opts.separator ""
opts.separator "Find unused (sub-)directories, recursively traversing DIRECTORY, by looking at the atime(s) of all contained files. A directory is considered unused if ALL of the contained files weren't accessed for DAYS days (default is 180 days.)"
opts.separator ""
opts.on("-d", "--days DAYS", Integer,
"Days after a file is considered unused") do |d|
options[:days] = d
end
end.parse!
dirs = ARGV
if dirs.length == 0
dirs = "."
end
dirs.each do |dir|
used_recently?(dir, options[:days])
end

@ -0,0 +1,8 @@
#!/bin/sh
seturl=$1
lynx -dump $seturl | perl -ne 'print "http://flickr.com/photo_zoom.gne?id=$1&size=l\n" if m#([0-9]+)/in/set-#' \
| while read url; do
lynx -source "$url" | grep "static.*flickr.com" \
| perl -ne 'print "$1\n" if m#a href="(.*?\.jpg)"#' \
| xargs wget -c;
done

@ -0,0 +1,8 @@
#!/bin/sh
tagurl=$1
lynx -dump $tagurl | perl -ne 'print "http://flickr.com/photo_zoom.gne?id=$1&size=l\n" if m#flickr.com/photos/.*/([0-9]+)/#' \
| while read url; do
lynx -source "$url" | grep "static.*flickr.com" \
| perl -ne 'print "$1\n" if m#a href="(.*?\.jpg)"#' \
| xargs wget -c;
done

@ -0,0 +1,30 @@
#!/bin/sh
# Block SSH connections from CN etc.
#
# This downloads a list of IP addresses from some website via unencrypted HTTP and then
# blocks this list of IP addresses without filtering. You should probably not use this
# script.
set -e
ports="ssh,websm" # comma-separated for iptables -m multiport
countries="cn hk" # space-separated
for country in $countries; do
ipset -q -N geoblock-$country hash:net || true
tmp_zone=`mktemp`
curl -s -o $tmp_zone http://www.ipdeny.com/ipblocks/data/aggregated/$country-aggregated.zone
for ip in $(cat $tmp_zone); do
ipset -A geoblock-$country "$ip" -exist
done
rm -f $tmp_zone
rule_spec="-p tcp -m multiport --dports $ports \
-m set --match-set geoblock-$country src -j REJECT"
if ! iptables -C INPUT $rule_spec; then
iptables -I INPUT $rule_spec
fi
done

@ -0,0 +1,26 @@
#!/bin/bash
# join pdfs and insert an empty (one-page) pdf for pdfs with an odd page number
# create an empty (one-page) pdf
emptypdf="$(mktemp -u /tmp/pdfjoin-aligned-XXXXXX).pdf"
echo -ne "0 0 moveto\n() show\n" | ps2pdf - "$emptypdf"
# go through pdfs
declare -a pdfs
for pdf in "$@"; do
pdfs+=("$pdf")
# is it odd or not?
pages=$(pdfinfo "$pdf" | awk '{ if (/^Pages:/) { print $2 } }')
odd=$(($pages % 2))
if [ "$odd" = "1" ]; then
pdfs+=("$emptypdf")
fi
done
# join!
out="$(mktemp -u /tmp/joined-XXXXXX).pdf"
pdfjoin --outfile "$out" "${pdfs[@]}"
# cleanup
rm -f "$emptypdf"

@ -0,0 +1,34 @@
#!/bin/bash
tmp=$(mktemp -d /tmp/pdfnup-slides-XXXXX)
[ -z "$tmp" ] && exit 1
# put 2x3 slides (pdf) on one page
#pdfnup --nup 2x3 --frame true \
# --delta "0.5cm 3cm" --offset "0cm 1cm" \
# --scale 0.85 \
# "$@" \
# --outfile "$tmp/1.pdf"
# put 2x2 slides (pdf) on one page
pdfnup --nup 2x2 --frame true \
--delta "0.5cm 0.5cm" \
--scale 0.9 \
"$@" \
--outfile "$tmp/1.pdf"
# scale up to a4 - now works for beamer slides, too.
# hackhackhack
cat > "$tmp/2.tex" <<EOF
\documentclass[a4paper,landscape]{article}
\usepackage{pdfpages}
\begin{document}
\includepdf[pages=-,nup=1x1,frame=false,trim=0 0 0 0,delta=0 0,offset=0 0,scale=1.0,turn=true,noautoscale=false,column=false,columnstrict=false,openright=false]{$tmp/1.pdf}
\end{document}
EOF
pdflatex -output-directory "$tmp" "$tmp/2.tex"
mv -v "$tmp/2.pdf" "$1.2x2.pdf"
# cleanup
rm -f "$tmp"/*.pdf "$tmp"/*.tex "$tmp"/*.aux "$tmp"/*.log
rmdir "$tmp"

@ -0,0 +1,72 @@
#!/usr/bin/env ruby
#
# Copyright (c) 2009, neingeist <neingeist@bl0rg.net>
# Copyright (c) 2005, Joe Mason <joe@notcharles.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Usage: rss-title-filter <list of titles to include>
#
# Skips all items which do NOT include the given titles.
#
# Example: to include all entries with the title "meme", "quiz" or
# "picture post", do:
#
# rss-title-filter meme quiz "picture post"
#
# Requires the Ruby RSS module, which is included in Ruby 1.8.3 or
# higher and available for older versions of Ruby at
# http://raa.ruby-lang.org/project/rss/.
#
# based on "rss-tag-filter" by Joe Mason.
require 'rss/2.0'
# if no titles were given, no parsing needed
if ARGV.empty? then
STDIN.each do |line|
STDOUT.puts(line)
end
exit(0)
end
# parse the RSS
rss_source = STDIN.collect.join
begin
rss = RSS::Parser.parse(rss_source)
rescue RSS::InvalidRSSError
rss = RSS::Parser.parse(rss_source, false) # no validation
end
# filter out all items NOT in the cmd-line args
rss.channel.items.reject! do |item|
found = false
ARGV.each do |arg|
if Regexp.new(arg, Regexp::IGNORECASE).match(item.title)
found = true
end
end
!found
end
# write the modified rss
STDOUT.puts(rss)

@ -0,0 +1,36 @@
#!/usr/bin/perl
# rss-feed für ruthe.de (liferea feed source "command")
use strict;
use LWP::Simple;
use XML::RSS;
use DateTime;
use DateTime::Format::W3CDTF;
my $f = DateTime::Format::W3CDTF->new();
my $rss = XML::RSS->new(version => '1.0');
$rss->channel(
title => "Ruthe.de",
link => "http://www.ruthe.de",
description => "Cartoons",
);
$_ = get("http://ruthe.de/frontend/archiv.php");
while (m#<a href="index.php\?pic=([0-9]+)&sort=datum&order=DESC"><img src="cartoons/tn_strip_([0-9]+).jpg"#sg) {
my ($id, $picid) = ($1, $2);
my $picurl = "http://ruthe.de/frontend/cartoons/strip_$picid.jpg";
my $pagurl = "http://ruthe.de/frontend/index.php?pic=$id&sort=datum&order=DESC";
my (undef, undef, $modified_time, undef, undef) = head($picurl);
my $date = $f->format_datetime(DateTime->from_epoch(epoch => $modified_time));
$rss->add_item(
title => "Comic Nr. $picid",
link => $pagurl,
description => "<a href=\"$pagurl\"> <img src=\"$picurl\" /> </a>",
dc => { date => $date },
);
}
print $rss->as_string;
Loading…
Cancel
Save