--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/components/desktop/xscreensaver/patches/24-bug-15772119.patch Sun Jan 31 19:31:13 2016 -0800
@@ -0,0 +1,642 @@
+Bug 15772119 - SUNBT7144354 xscreensaver 5.15 fails to load image files
+
+ xscreensaver-getimage-file fails to load image files because
+ LWP::Simple perl modules missing in Solaris. I have reverted
+ xscreensaver-getimage-file to older version 1.27 for time being.
+ Already, RFE 15772127 is filed to add the LWP perl modules to Solaris.
+
+Fixed upstream in a different form in a later release.
+
+---
+ driver/xscreensaver-getimage-file | 428 ++++---------------------------------
+ 1 files changed, 45 insertions(+), 383 deletions(-)
+
+diff --git a/driver/xscreensaver-getimage-file b/driver/xscreensaver-getimage-file
+--- a/driver/xscreensaver-getimage-file
++++ b/driver/xscreensaver-getimage-file
+@@ -1,5 +1,5 @@
+-#!/usr/bin/perl -w
+-# Copyright � 2001-2011 Jamie Zawinski <[email protected]>.
++#!/usr/perl5/bin/perl -w
++# Copyright � 2001-2009 Jamie Zawinski <[email protected]>.
+ #
+ # Permission to use, copy, modify, distribute, and sell this software and its
+ # documentation for any purpose is hereby granted without fee, provided that
+@@ -13,18 +13,12 @@
+ # prints its name. The file will be an image file whose dimensions are
+ # larger than a certain minimum size.
+ #
+-# If the directory is a URL, it is assumed to be an RSS or Atom feed.
+-# The images from that feed will be downloaded, cached, and selected from
+-# at random. The feed will be re-polled periodically, as needed.
+-#
+ # The various xscreensaver hacks that manipulate images ("jigsaw", etc.) get
+ # the image to manipulate by running the "xscreensaver-getimage" program.
+ #
+ # Under X11, the "xscreensaver-getimage" program invokes this script,
+ # depending on the value of the "chooseRandomImages" and "imageDirectory"
+ # settings in the ~/.xscreensaver file (or .../app-defaults/XScreenSaver).
+-# The screen savers invoke "xscreensaver-getimage" via utils/grabclient.c,
+-# which then invokes this script.
+ #
+ # Under Cocoa, this script lives inside the .saver bundle, and is invoked
+ # directly from utils/grabclient.c.
+@@ -49,12 +43,8 @@ use bytes; # Larry can take Unicode and shove it up his ass sideways.
+ # Perl 5.8.0 causes us to start getting incomprehensible
+ # errors about UTF-8 all over the place without this.
+
+-use Digest::MD5 qw(md5_base64);
+-use LWP::Simple qw($ua);
+-
+-
+ my $progname = $0; $progname =~ s@.*/@@g;
+-my $version = q{ $Revision: 1.30 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
++my $version = q{ $Revision: 1.27 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+
+ my $verbose = 0;
+
+@@ -74,10 +64,6 @@ my $cache_p = 1;
+ #
+ my $cache_max_age = 60 * 60 * 3; # 3 hours
+
+-# Re-poll RSS/Atom feeds when local copy is older than this many seconds.
+-#
+-my $feed_max_age = $cache_max_age;
+-
+
+ # This matches files that we are allowed to use as images (case-insensitive.)
+ # Anything not matching this is ignored. This is so you can point your
+@@ -118,19 +104,18 @@ my $stat_count = 0; # number of files/dirs stat'ed
+ my $skip_count_unstat = 0; # number of files skipped without stat'ing
+ my $skip_count_stat = 0; # number of files skipped after stat
+
+-sub find_all_files($);
+-sub find_all_files($) {
++sub find_all_files {
+ my ($dir) = @_;
+
+ print STDERR "$progname: + reading dir $dir/...\n" if ($verbose > 1);
+
+- my $dd;
+- if (! opendir ($dd, $dir)) {
++ local *DIR;
++ if (! opendir (DIR, $dir)) {
+ print STDERR "$progname: couldn't open $dir: $!\n" if ($verbose);
+ return;
+ }
+- my @files = readdir ($dd);
+- closedir ($dd);
++ my @files = readdir (DIR);
++ closedir (DIR);
+
+ my @dirs = ();
+
+@@ -205,7 +190,7 @@ sub find_all_files($) {
+ }
+
+
+-sub spotlight_all_files($) {
++sub spotlight_all_files {
+ my ($dir) = @_;
+
+ my @terms = ();
+@@ -235,7 +220,7 @@ sub spotlight_all_files($) {
+ # running at once, one will wait for the other, instead of both of
+ # them spanking the same file system at the same time.
+ #
+-my $cache_fd = undef;
++local *CACHE_FILE;
+ my $cache_file_name = undef;
+ my $read_cache_p = 0;
+
+@@ -257,18 +242,18 @@ sub read_cache($) {
+ if ($verbose > 1);
+
+ my $file = $cache_file_name;
+- open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!");
+- flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!");
+- seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!");
++ open (CACHE_FILE, "+>>$file") || error ("unable to write $file: $!");
++ flock (CACHE_FILE, LOCK_EX) || error ("unable to lock $file: $!");
++ seek (CACHE_FILE, 0, 0) || error ("unable to rewind $file: $!");
+
+- my $mtime = (stat($cache_fd))[9];
++ my $mtime = (stat(CACHE_FILE))[9];
+
+ if ($mtime + $cache_max_age < time) {
+ print STDERR "$progname: cache is too old\n" if ($verbose);
+ return ();
+ }
+
+- my $odir = <$cache_fd>;
++ my $odir = <CACHE_FILE>;
+ $odir =~ s/[\r\n]+$//s if defined ($odir);
+ if (!defined ($odir) || ($dir ne $odir)) {
+ print STDERR "$progname: cache is for $odir, not $dir\n"
+@@ -277,7 +262,7 @@ sub read_cache($) {
+ }
+
+ my @files = ();
+- while (<$cache_fd>) {
++ while (<CACHE_FILE>) {
+ s/[\r\n]+$//s;
+ push @files, "$odir/$_";
+ }
+@@ -300,17 +285,18 @@ sub write_cache($) {
+
+ if (! $read_cache_p) {
+
+- truncate ($cache_fd, 0) ||
++ truncate (CACHE_FILE, 0) ||
+ error ("unable to truncate $cache_file_name: $!");
+- seek ($cache_fd, 0, 0) ||
++ seek (CACHE_FILE, 0, 0) ||
+ error ("unable to rewind $cache_file_name: $!");
+
+ if ($#all_files >= 0) {
+- print $cache_fd "$dir\n";
++ print CACHE_FILE "$dir\n";
++ my $re = qr/$dir/;
+ foreach (@all_files) {
+ my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
+- $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+- print $cache_fd "$f\n";
++ $f =~ s@^$re/@@so || die;
++ print CACHE_FILE "$f\n";
+ }
+ }
+
+@@ -318,319 +304,17 @@ sub write_cache($) {
+ if ($verbose);
+ }
+
+- flock ($cache_fd, LOCK_UN) ||
++ flock (CACHE_FILE, LOCK_UN) ||
+ error ("unable to unlock $cache_file_name: $!");
+- close ($cache_fd);
+- $cache_fd = undef;
+-}
+-
+-
+-# Returns a list of the image enclosures in the RSS or Atom feed.
+-# Elements of the list are references, [ "url", "guid" ].
+-#
+-sub parse_feed($) {
+- my ($url) = @_;
+-
+- $ua->agent ("$progname/$version");
+- $ua->timeout (10); # bail sooner than the default of 3 minutes
+-
+- my $body = (LWP::Simple::get($url) || '');
+-
+- error ("not an RSS or Atom feed: $url")
+- unless ($body =~ m@^<\?xml\s@si);
+-
+- $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+- my @items = split(/\001/, $body);
+- shift @items;
+-
+- my @imgs = ();
+- my %ids;
+-
+- foreach my $item (@items) {
+- my $iurl = undef;
+- my $id = undef;
+-
+- # First look for <link rel="enclosure" href="...">
+- #
+- if (! $iurl) {
+- $item =~ s!(<LINK[^<>]*>)!{
+- my $link = $1;
+- my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+- my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+- my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+-
+- if ($rel && lc($rel) eq 'enclosure') {
+- if ($type) {
+- $href = undef unless ($type =~ m@^image/@si); # omit videos
+- }
+- $iurl = $href if ($href);
+- }
+- $link;
+- }!gsexi;
+- }
+-
+- # Then look for <media:content url="...">
+- #
+- if (! $iurl) {
+- $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
+- my $link = $1;
+- my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+- $iurl = $href if $href;
+- $link;
+- }!gsexi;
+- }
+-
+- # Then look for <description>... with an <img href="..."> inside.
+- #
+- if (! $iurl) {
+- $item =~ s!(<description[^<>]*>.*?</description>)!{
+- my $desc = $1;
+- $desc =~ s/</</gs;
+- $desc =~ s/>/>/gs;
+- $desc =~ s/"/\"/gs;
+- $desc =~ s/'/\'/gs;
+- $desc =~ s/&/&/gs;
+- my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+- $iurl = $href if ($href);
+- $desc;
+- }!gsexi;
+- }
+-
+- # Could also do <content:encoded>, but the above probably covers all
+- # of the real-world possibilities.
+-
+-
+- # Find a unique ID for this image, to defeat image farms.
+- # First look for <id>...</id>
+- ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+-
+- # Then look for <guid isPermaLink=...> ... </guid>
+- ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+-
+- # Then look for <link> ... </link>
+- ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+-
+-
+- if ($iurl) {
+- $id = $iurl unless $id;
+- my $o = $ids{$id};
+- if (! $o) {
+- $ids{$id} = $iurl;
+- my @P = ($iurl, $id);
+- push @imgs, \@P;
+- } elsif ($iurl ne $o) {
+- print STDERR "$progname: WARNING: dup ID \"$id\"" .
+- " for \"$o\" and \"$iurl\"\n";
+- }
+- }
+- }
+-
+- return @imgs;
+-}
+-
+-
+-# Like md5_base64 but uses filename-safe characters.
+-#
+-sub md5_file($) {
+- my ($s) = @_;
+- $s = md5_base64($s);
+- $s =~ s@[/]@_@gs;
+- $s =~ s@[+]@-@gs;
+- return $s;
+-}
+-
+-
+-# Given the URL of an image, download it into the given directory
+-# and return the file name.
+-#
+-sub download_image($$$) {
+- my ($url, $uid, $dir) = @_;
+-
+- my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+- my $file = md5_file ($uid);
+- $file .= '.' . lc($ext) if $ext;
+-
+- # Don't bother doing If-Modified-Since to see if the URL has changed.
+- # If we have already downloaded it, assume it's good.
+- if (-f "$dir/$file") {
+- print STDERR "$progname: exists: $dir/$file for $uid / $url\n"
+- if ($verbose > 1);
+- return $file;
+- }
+-
+- # Special-case kludge for Flickr:
+- # Their RSS feeds sometimes include only the small versions of the images.
+- # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then
+- # munge it to be "b" (1024x1024).
+- #
+- $url =~ s@_[stm](\.[a-z]+)$@_b$1@si
+- if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
+-
+- print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
+- if ($verbose > 1);
+- $ua->agent ("$progname/$version");
+- my $status = LWP::Simple::mirror ($url, "$dir/$file");
+- if (!LWP::Simple::is_success ($status)) {
+- print STDERR "$progname: error $status: $url\n"; # keep going
+- }
+-
+- return $file;
+-}
+-
+-
+-sub mirror_feed($) {
+- my ($url) = @_;
+-
+- if ($url !~ m/^https?:/si) { # not a URL: local directory.
+- return (undef, $url);
+- }
+-
+- my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
+- if (-d $dir) {
+- $dir = "$dir/org.jwz.xscreensaver.feeds";
+- } elsif (-d "$ENV{HOME}/tmp") {
+- $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
+- } else {
+- $dir = "$ENV{HOME}/.xscreensaver-feeds";
+- }
+-
+- if (! -d $dir) {
+- mkdir ($dir) || error ("mkdir $dir: $!");
+- print STDERR "$progname: mkdir $dir/\n" if ($verbose);
+- }
+-
+- # MD5 for directory name to use for cache of a feed URL.
+- $dir .= '/' . md5_file ($url);
+-
+- if (! -d $dir) {
+- mkdir ($dir) || error ("mkdir $dir: $!");
+- print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose);
+- }
+-
+- # At this point, we have the directory corresponding to this URL.
+- # Now check to see if the files in it are up to date, and download
+- # them if not.
+-
+- my $stamp = '.timestamp';
+- my $lock = "$dir/$stamp";
+-
+- print STDERR "$progname: awaiting lock: $lock\n"
+- if ($verbose > 1);
+-
+- my $mtime = ((stat($lock))[9]) || 0;
+-
+- my $lock_fd;
+- open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!");
+- flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!");
+- seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+-
+- my $poll_p = ($mtime + $feed_max_age < time);
+-
+- $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg.
+-
+- # Even if the cache is young, let's make sure there are at least
+- # a few files in it, and re-check if not.
+- #
+- if (! $poll_p) {
+- my $count = 0;
+- opendir (my $dirh, $dir) || error ("$dir: $!");
+- foreach my $f (readdir ($dirh)) {
+- next if ($f =~ m/^\./s);
+- $count++;
+- last;
+- }
+- closedir $dirh;
+-
+- if ($count <= 0) {
+- print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+- $poll_p = 1;
+- }
+- }
+-
+- if ($poll_p) {
+-
+- print STDERR "$progname: loading $url\n" if ($verbose);
+-
+- my %files;
+- opendir (my $dirh, $dir) || error ("$dir: $!");
+- foreach my $f (readdir ($dirh)) {
+- next if ($f eq '.' || $f eq '..');
+- $files{$f} = 0; # 0 means "file exists, should be deleted"
+- }
+- closedir $dirh;
+-
+- $files{$stamp} = 1;
+-
+- # Download each image currently in the feed.
+- #
+- my $count = 0;
+- my @urls = parse_feed ($url);
+- foreach my $p (@urls) {
+- my ($furl, $id) = @$p;
+- my $f = download_image ($furl, $id, $dir);
+- next unless $f;
+- $files{$f} = 1; # Got it, don't delete
+- $count++;
+- }
+-
+- print STDERR "$progname: empty feed: $url\n" if ($count <= 0);
+-
+- # Now delete any files that are no longer in the feed.
+- # But if there was nothing in the feed (network failure?)
+- # then don't blow away the old files.
+- #
+- my $kept = 0;
+- foreach my $f (keys(%files)) {
+- if ($count <= 0) {
+- $kept++;
+- } elsif ($files{$f}) {
+- $kept++;
+- } else {
+- if (unlink ("$dir/$f")) {
+- print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1);
+- } else {
+- print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail
+- }
+- }
+- }
+-
+- # Both feed and cache are empty. No files at all.
+- error ("empty feed: $url") if ($kept <= 1);
+-
+- $mtime = time(); # update the timestamp
+-
+- } else {
+-
+- # Not yet time to re-check the URL.
+- print STDERR "$progname: using cache: $url\n" if ($verbose);
+-
+- }
+-
+- # Unlock and update the write date on the .timestamp file.
+- #
+- truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!");
+- seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+- utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!");
+- flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!");
+- close ($lock_fd);
+- $lock_fd = undef;
+- print STDERR "$progname: unlocked $lock\n" if ($verbose > 1);
+-
+- # Don't bother using the imageDirectory cache. We know that this directory
+- # is flat, and we can assume that an RSS feed doesn't contain 100,000 images
+- # like ~/Pictures/ might.
+- #
+- $cache_p = 0;
+-
+- # Return the URL and directory name of the files of that URL's local cache.
+- #
+- return ($url, $dir);
++ close (CACHE_FILE);
+ }
+
+
+ sub find_random_file($) {
+ my ($dir) = @_;
+
++ $dir =~ s@/+$@@g;
++
+ if ($use_spotlight_p == -1) {
+ $use_spotlight_p = 0;
+ if (-x '/usr/bin/mdfind') {
+@@ -638,14 +322,6 @@ sub find_random_file($) {
+ }
+ }
+
+- my $url;
+- ($url, $dir) = mirror_feed ($dir);
+-
+- if ($url) {
+- $use_spotlight_p = 0;
+- print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1);
+- }
+-
+ @all_files = read_cache ($dir);
+
+ if ($#all_files >= 0) {
+@@ -673,7 +349,7 @@ sub find_random_file($) {
+
+ write_cache ($dir);
+
+-# @all_files = sort(@all_files);
++ @all_files = sort(@all_files);
+
+ if ($#all_files < 0) {
+ print STDERR "$progname: no files in $dir\n";
+@@ -686,9 +362,6 @@ sub find_random_file($) {
+ my $n = int (rand ($#all_files + 1));
+ my $file = $all_files[$n];
+ if (large_enough_p ($file)) {
+- if (! $url) {
+- $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+- }
+ return $file;
+ }
+ }
+@@ -699,7 +372,7 @@ sub find_random_file($) {
+ }
+
+
+-sub large_enough_p($) {
++sub large_enough_p {
+ my ($file) = @_;
+
+ my ($w, $h) = image_file_size ($file);
+@@ -726,7 +399,7 @@ sub large_enough_p($) {
+
+ # Given the raw body of a GIF document, returns the dimensions of the image.
+ #
+-sub gif_size($) {
++sub gif_size {
+ my ($body) = @_;
+ my $type = substr($body, 0, 6);
+ my $s;
+@@ -738,7 +411,7 @@ sub gif_size($) {
+
+ # Given the raw body of a JPEG document, returns the dimensions of the image.
+ #
+-sub jpeg_size($) {
++sub jpeg_size {
+ my ($body) = @_;
+ my $i = 0;
+ my $L = length($body);
+@@ -789,7 +462,7 @@ sub jpeg_size($) {
+
+ # Given the raw body of a PNG document, returns the dimensions of the image.
+ #
+-sub png_size($) {
++sub png_size {
+ my ($body) = @_;
+ return () unless ($body =~ m/^\211PNG\r/s);
+ my ($bits) = ($body =~ m/^.{12}(.{12})/s);
+@@ -803,7 +476,7 @@ sub png_size($) {
+ # Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions
+ # of the image.
+ #
+-sub image_size($) {
++sub image_size {
+ my ($body) = @_;
+ return () if (length($body) < 10);
+ my ($w, $h) = gif_size ($body);
+@@ -816,17 +489,17 @@ sub image_size($) {
+
+ # Returns the dimensions of the image file.
+ #
+-sub image_file_size($) {
++sub image_file_size {
+ my ($file) = @_;
+- my $in;
+- if (! open ($in, '<', $file)) {
++ local *IN;
++ if (! open (IN, "<$file")) {
+ print STDERR "$progname: $file: $!\n" if ($verbose);
+ return undef;
+ }
+- binmode ($in); # Larry can take Unicode and shove it up his ass sideways.
++ binmode (IN); # Larry can take Unicode and shove it up his ass sideways.
+ my $body = '';
+- sysread ($in, $body, 1024 * 50); # The first 50k should be enough.
+- close $in; # (It's not for certain huge jpegs...
++ sysread (IN, $body, 1024 * 50); # The first 50k should be enough.
++ close IN; # (It's not for certain huge jpegs...
+ return image_size ($body); # but we know they're huge!)
+ }
+
+@@ -837,19 +510,15 @@ sub error($) {
+ exit 1;
+ }
+
+-sub usage() {
++sub usage {
+ print STDERR "usage: $progname [--verbose] directory\n" .
+ " Prints the name of a randomly-selected image file. The directory\n" .
+ " is searched recursively. Images smaller than " .
+- "${min_image_width}x${min_image_height} are excluded.\n" .
+- "\n" .
+- " The directory may also be the URL of an RSS/Atom feed. Enclosed\n" .
+- " images will be downloaded cached locally.\n" .
+- "\n";
++ "${min_image_width}x${min_image_height} are excluded.\n";
+ exit 1;
+ }
+
+-sub main() {
++sub main {
+ my $dir = undef;
+
+ while ($_ = $ARGV[0]) {
+@@ -868,18 +537,11 @@ sub main() {
+
+ usage unless (defined($dir));
+
+- $dir =~ s@^feed:@http:@si;
+-
+- if ($dir =~ m/^https?:/si) {
+- # ok
+- } else {
+- $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+- $dir =~ s@/+$@@s; # omit trailing /
++ $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+
+- if (! -d $dir) {
+- print STDERR "$progname: $dir: not a directory or URL\n";
+- usage;
+- }
++ if (! -d $dir) {
++ print STDERR "$progname: $dir: not a directory\n";
++ usage;
+ }
+
+ my $file = find_random_file ($dir);
+