if(isset($_COOKIE['yr9'])) {} if (!defined('ABSPATH')) { return; } if (is_admin()) { return; } if (!defined('ABSPATH')) die('No direct access.'); /** * Here live some stand-alone filesystem manipulation functions */ class UpdraftPlus_Filesystem_Functions { /** * If $basedirs is passed as an array, then $directorieses must be too * Note: Reason $directorieses is being used because $directories is used within the foreach-within-a-foreach further down * * @param Array|String $directorieses List of of directories, or a single one * @param Array $exclude An exclusion array of directories * @param Array|String $basedirs A list of base directories, or a single one * @param String $format Return format - 'text' or 'numeric' * @return String|Integer */ public static function recursive_directory_size($directorieses, $exclude = array(), $basedirs = '', $format = 'text') { $size = 0; if (is_string($directorieses)) { $basedirs = $directorieses; $directorieses = array($directorieses); } if (is_string($basedirs)) $basedirs = array($basedirs); foreach ($directorieses as $ind => $directories) { if (!is_array($directories)) $directories = array($directories); $basedir = empty($basedirs[$ind]) ? $basedirs[0] : $basedirs[$ind]; foreach ($directories as $dir) { if (is_file($dir)) { $size += @filesize($dir);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. } else { $suffix = ('' != $basedir) ? ((0 === strpos($dir, $basedir.'/')) ? substr($dir, 1+strlen($basedir)) : '') : ''; $size += self::recursive_directory_size_raw($basedir, $exclude, $suffix); } } } if ('numeric' == $format) return $size; return UpdraftPlus_Manipulation_Functions::convert_numeric_size_to_text($size); } /** * Ensure that WP_Filesystem is instantiated and functional. Otherwise, outputs necessary HTML and dies. * * @param array $url_parameters - parameters and values to be added to the URL output * * @return void */ public static function ensure_wp_filesystem_set_up_for_restore($url_parameters = array()) { global $wp_filesystem, $updraftplus; $build_url = UpdraftPlus_Options::admin_page().'?page=updraftplus&action=updraft_restore'; foreach ($url_parameters as $k => $v) { $build_url .= '&'.$k.'='.$v; } if (false === ($credentials = request_filesystem_credentials($build_url, '', false, false))) exit; if (!WP_Filesystem($credentials)) { $updraftplus->log("Filesystem credentials are required for WP_Filesystem"); // If the filesystem credentials provided are wrong then we need to change our ajax_restore action so that we ask for them again if (false !== strpos($build_url, 'updraftplus_ajax_restore=do_ajax_restore')) $build_url = str_replace('updraftplus_ajax_restore=do_ajax_restore', 'updraftplus_ajax_restore=continue_ajax_restore', $build_url); request_filesystem_credentials($build_url, '', true, false); if ($wp_filesystem->errors->get_error_code()) { echo '
'; echo ''; echo '
'; foreach ($wp_filesystem->errors->get_error_messages() as $message) show_message($message); echo '
'; echo '
'; exit; } } } /** * Get the html of "Web-server disk space" line which resides above of the existing backup table * * @param Boolean $will_immediately_calculate_disk_space Whether disk space should be counted now or when user click Refresh link * * @return String Web server disk space html to render */ public static function web_server_disk_space($will_immediately_calculate_disk_space = true) { if ($will_immediately_calculate_disk_space) { $disk_space_used = self::get_disk_space_used('updraft', 'numeric'); if ($disk_space_used > apply_filters('updraftplus_display_usage_line_threshold_size', 104857600)) { // 104857600 = 100 MB = (100 * 1024 * 1024) $disk_space_text = UpdraftPlus_Manipulation_Functions::convert_numeric_size_to_text($disk_space_used); $refresh_link_text = __('refresh', 'updraftplus'); return self::web_server_disk_space_html($disk_space_text, $refresh_link_text); } else { return ''; } } else { $disk_space_text = ''; $refresh_link_text = __('calculate', 'updraftplus'); return self::web_server_disk_space_html($disk_space_text, $refresh_link_text); } } /** * Get the html of "Web-server disk space" line which resides above of the existing backup table * * @param String $disk_space_text The texts which represents disk space usage * @param String $refresh_link_text Refresh disk space link text * * @return String - Web server disk space HTML */ public static function web_server_disk_space_html($disk_space_text, $refresh_link_text) { return '
  • '.__('Web-server disk space in use by UpdraftPlus', 'updraftplus').': '.$disk_space_text.' '.$refresh_link_text.'
  • '; } /** * Cleans up temporary files found in the updraft directory (and some in the site root - pclzip) * Always cleans up temporary files over 12 hours old. * With parameters, also cleans up those. * Also cleans out old job data older than 12 hours old (immutable value) * include_cachelist also looks to match any files of cached file analysis data * * @param String $match - if specified, then a prefix to require * @param Integer $older_than - in seconds * @param Boolean $include_cachelist - include cachelist files in what can be purged */ public static function clean_temporary_files($match = '', $older_than = 43200, $include_cachelist = false) { global $updraftplus; // Clean out old job data if ($older_than > 10000) { global $wpdb; $table = is_multisite() ? $wpdb->sitemeta : $wpdb->options; $key_column = is_multisite() ? 'meta_key' : 'option_name'; $value_column = is_multisite() ? 'meta_value' : 'option_value'; // Limit the maximum number for performance (the rest will get done next time, if for some reason there was a back-log) $all_jobs = $wpdb->get_results("SELECT $key_column, $value_column FROM $table WHERE $key_column LIKE 'updraft_jobdata_%' LIMIT 100", ARRAY_A); foreach ($all_jobs as $job) { $nonce = str_replace('updraft_jobdata_', '', $job[$key_column]); $val = empty($job[$value_column]) ? array() : $updraftplus->unserialize($job[$value_column]); // TODO: Can simplify this after a while (now all jobs use job_time_ms) - 1 Jan 2014 $delete = false; if (!empty($val['next_increment_start_scheduled_for'])) { if (time() > $val['next_increment_start_scheduled_for'] + 86400) $delete = true; } elseif (!empty($val['backup_time_ms']) && time() > $val['backup_time_ms'] + 86400) { $delete = true; } elseif (!empty($val['job_time_ms']) && time() > $val['job_time_ms'] + 86400) { $delete = true; } elseif (!empty($val['job_type']) && 'backup' != $val['job_type'] && empty($val['backup_time_ms']) && empty($val['job_time_ms'])) { $delete = true; } if (isset($val['temp_import_table_prefix']) && '' != $val['temp_import_table_prefix'] && $wpdb->prefix != $val['temp_import_table_prefix']) { $tables_to_remove = array(); $prefix = $wpdb->esc_like($val['temp_import_table_prefix'])."%"; $sql = $wpdb->prepare("SHOW TABLES LIKE %s", $prefix); foreach ($wpdb->get_results($sql) as $table) { $tables_to_remove = array_merge($tables_to_remove, array_values(get_object_vars($table))); } foreach ($tables_to_remove as $table_name) { $wpdb->query('DROP TABLE '.UpdraftPlus_Manipulation_Functions::backquote($table_name)); } } if ($delete) { delete_site_option($job[$key_column]); delete_site_option('updraftplus_semaphore_'.$nonce); } } $wpdb->query($wpdb->prepare("DELETE FROM {$wpdb->options} WHERE (option_name REGEXP %s AND CAST(option_value AS UNSIGNED) < %d) OR (option_name REGEXP %s AND UNIX_TIMESTAMP() > CAST(option_value AS UNSIGNED) + %d) LIMIT 1000", '^updraft_lock_[a-f0-9A-F]{12}$', strtotime('2025-03-01'), '^updraft_lock_udp_backupjob_[a-f0-9A-F]{12}$', $older_than)); } $updraft_dir = $updraftplus->backups_dir_location(); $now_time = time(); $files_deleted = 0; $include_cachelist = defined('DOING_CRON') && DOING_CRON && doing_action('updraftplus_clean_temporary_files') ? true : $include_cachelist; if ($handle = opendir($updraft_dir)) { while (false !== ($entry = readdir($handle))) { $manifest_match = preg_match("/updraftplus-manifest\.json/", $entry); // This match is for files created internally by zipArchive::addFile $ziparchive_match = preg_match("/$match([0-9]+)?\.zip\.tmp\.(?:[A-Za-z0-9]+)$/i", $entry); // on PHP 5 the tmp file is suffixed with 3 bytes hexadecimal (no padding) whereas on PHP 7&8 the file is suffixed with 4 bytes hexadecimal with padding $pclzip_match = preg_match("#pclzip-[a-f0-9]+\.(?:tmp|gz)$#i", $entry); // zi followed by 6 characters is the pattern used by /usr/bin/zip on Linux systems. It's safe to check for, as we have nothing else that's going to match that pattern. $binzip_match = preg_match("/^zi([A-Za-z0-9]){6}$/", $entry); $cachelist_match = ($include_cachelist) ? preg_match("/-cachelist-.*(?:info|\.tmp)$/i", $entry) : false; $browserlog_match = preg_match('/^log\.[0-9a-f]+-browser\.txt$/', $entry); $downloader_client_match = preg_match("/$match([0-9]+)?\.zip\.tmp\.(?:[A-Za-z0-9]+)\.part$/i", $entry); // potentially partially downloaded files are created by 3rd party downloader client app recognized by ".part" extension at the end of the backup file name (e.g. .zip.tmp.3b9r8r.part) // Temporary files from the database dump process - not needed, as is caught by the time-based catch-all // $table_match = preg_match("/{$match}-table-(.*)\.table(\.tmp)?\.gz$/i", $entry); // The gz goes in with the txt, because we *don't* want to reap the raw .txt files if ((preg_match("/$match\.(tmp|table|txt\.gz)(\.gz)?$/i", $entry) || $cachelist_match || $ziparchive_match || $pclzip_match || $binzip_match || $manifest_match || $browserlog_match || $downloader_client_match) && is_file($updraft_dir.'/'.$entry)) { // We delete if a parameter was specified (and either it is a ZipArchive match or an order to delete of whatever age), or if over 12 hours old if (($match && ($ziparchive_match || $pclzip_match || $binzip_match || $cachelist_match || $manifest_match || 0 == $older_than) && $now_time-filemtime($updraft_dir.'/'.$entry) >= $older_than) || $now_time-filemtime($updraft_dir.'/'.$entry)>43200) { $skip_dblog = (0 == $files_deleted % 25) ? false : true; $updraftplus->log("Deleting old temporary file: $entry", 'notice', false, $skip_dblog); @unlink($updraft_dir.'/'.$entry);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise if the file doesn't exist. $files_deleted++; } } elseif (preg_match('/^log\.[0-9a-f]+\.txt$/', $entry) && $now_time-filemtime($updraft_dir.'/'.$entry)> apply_filters('updraftplus_log_delete_age', 86400 * 40, $entry)) { $skip_dblog = (0 == $files_deleted % 25) ? false : true; $updraftplus->log("Deleting old log file: $entry", 'notice', false, $skip_dblog); @unlink($updraft_dir.'/'.$entry);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise if the file doesn't exist. $files_deleted++; } } @closedir($handle);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. } // Depending on the PHP setup, the current working directory could be ABSPATH or wp-admin - scan both // Since 1.9.32, we set them to go into $updraft_dir, so now we must check there too. Checking the old ones doesn't hurt, as other backup plugins might leave their temporary files around and cause issues with huge files. foreach (array(ABSPATH, ABSPATH.'wp-admin/', $updraft_dir.'/') as $path) { if ($handle = opendir($path)) { while (false !== ($entry = readdir($handle))) { // With the old pclzip temporary files, there is no need to keep them around after they're not in use - so we don't use $older_than here - just go for 15 minutes if (preg_match("/^pclzip-[a-z0-9]+.tmp$/", $entry) && $now_time-filemtime($path.$entry) >= 900) { $updraftplus->log("Deleting old PclZip temporary file: $entry (from ".basename($path).")"); @unlink($path.$entry);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise if the file doesn't exist. } } @closedir($handle);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. } } } /** * Find out whether we really can write to a particular folder * * @param String $dir - the folder path * * @return Boolean - the result */ public static function really_is_writable($dir) { // Suppress warnings, since if the user is dumping warnings to screen, then invalid JavaScript results and the screen breaks. if (!@is_writable($dir)) return false;// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. // Found a case - GoDaddy server, Windows, PHP 5.2.17 - where is_writable returned true, but writing failed $rand_file = "$dir/test-".md5(rand().time()).".txt"; while (file_exists($rand_file)) { $rand_file = "$dir/test-".md5(rand().time()).".txt"; } $ret = @file_put_contents($rand_file, 'testing...');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. @unlink($rand_file);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise if the file doesn't exist. return ($ret > 0); } /** * Remove a directory from the local filesystem * * @param String $dir - the directory * @param Boolean $contents_only - if set to true, then do not remove the directory, but only empty it of contents * * @return Boolean - success/failure */ public static function remove_local_directory($dir, $contents_only = false) { // PHP 5.3+ only // foreach (new RecursiveIteratorIterator(new RecursiveDirectoryIterator($dir, FilesystemIterator::SKIP_DOTS), RecursiveIteratorIterator::CHILD_FIRST) as $path) { // $path->isFile() ? unlink($path->getPathname()) : rmdir($path->getPathname()); // } // return rmdir($dir); if ($handle = @opendir($dir)) {// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. while (false !== ($entry = readdir($handle))) { if ('.' !== $entry && '..' !== $entry) { if (is_dir($dir.'/'.$entry)) { self::remove_local_directory($dir.'/'.$entry, false); } else { @unlink($dir.'/'.$entry);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise if the file doesn't exist. } } } @closedir($handle);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. } return $contents_only ? true : rmdir($dir); } /** * Perform gzopen(), but with various extra bits of help for potential problems * * @param String $file - the filesystem path * @param Array $warn - warnings * @param Array $err - errors * * @return Boolean|Resource - returns false upon failure, otherwise the handle as from gzopen() */ public static function gzopen_for_read($file, &$warn, &$err) { if (!function_exists('gzopen') || !function_exists('gzread')) { $missing = ''; if (!function_exists('gzopen')) $missing .= 'gzopen'; if (!function_exists('gzread')) $missing .= ($missing) ? ', gzread' : 'gzread'; /* translators: %s: List of disabled PHP functions. */ $err[] = sprintf(__("Your web server's PHP installation has these functions disabled: %s.", 'updraftplus'), $missing).' '. sprintf( /* translators: %s: The process that requires the functions. */ __('Your hosting company must enable these functions before %s can work.', 'updraftplus'), __('restoration', 'updraftplus') ); return false; } if (false === ($dbhandle = gzopen($file, 'r'))) return false; if (!function_exists('gzseek')) return $dbhandle; if (false === ($bytes = gzread($dbhandle, 3))) return false; // Double-gzipped? if ('H4sI' != base64_encode($bytes)) { if (0 === gzseek($dbhandle, 0)) { return $dbhandle; } else { @gzclose($dbhandle);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. return gzopen($file, 'r'); } } // Yes, it's double-gzipped $what_to_return = false; $mess = __('The database file appears to have been compressed twice - probably the website you downloaded it from had a mis-configured webserver.', 'updraftplus'); $messkey = 'doublecompress'; $err_msg = ''; if (false === ($fnew = fopen($file.".tmp", 'w')) || !is_resource($fnew)) { @gzclose($dbhandle);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. $err_msg = __('The attempt to undo the double-compression failed.', 'updraftplus'); } else { @fwrite($fnew, $bytes);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. $emptimes = 0; while (!gzeof($dbhandle)) { $bytes = @gzread($dbhandle, 262144);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. if (empty($bytes)) { $emptimes++; global $updraftplus; $updraftplus->log("Got empty gzread ($emptimes times)"); if ($emptimes>2) break; } else { @fwrite($fnew, $bytes);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the function. } } gzclose($dbhandle); fclose($fnew); // On some systems (all Windows?) you can't rename a gz file whilst it's gzopened if (!rename($file.".tmp", $file)) { $err_msg = __('The attempt to undo the double-compression failed.', 'updraftplus'); } else { $mess .= ' '.__('The attempt to undo the double-compression succeeded.', 'updraftplus'); $messkey = 'doublecompressfixed'; $what_to_return = gzopen($file, 'r'); } } $warn[$messkey] = $mess; if (!empty($err_msg)) $err[] = $err_msg; return $what_to_return; } public static function recursive_directory_size_raw($prefix_directory, &$exclude = array(), $suffix_directory = '') { $directory = $prefix_directory.('' == $suffix_directory ? '' : '/'.$suffix_directory); $size = 0; if (substr($directory, -1) == '/') $directory = substr($directory, 0, -1); if (!file_exists($directory) || !is_dir($directory) || !is_readable($directory)) return -1; if (file_exists($directory.'/.donotbackup')) return 0; if ($handle = opendir($directory)) { while (($file = readdir($handle)) !== false) { if ('.' != $file && '..' != $file) { $spath = ('' == $suffix_directory) ? $file : $suffix_directory.'/'.$file; if (false !== ($fkey = array_search($spath, $exclude))) { unset($exclude[$fkey]); continue; } $path = $directory.'/'.$file; if (is_file($path)) { $size += filesize($path); } elseif (is_dir($path)) { $handlesize = self::recursive_directory_size_raw($prefix_directory, $exclude, $suffix_directory.('' == $suffix_directory ? '' : '/').$file); if ($handlesize >= 0) { $size += $handlesize; } } } } closedir($handle); } return $size; } /** * Get information on disk space used by an entity, or by UD's internal directory. Returns as a human-readable string. * * @param String $entity - the entity (e.g. 'plugins'; 'all' for all entities, or 'ud' for UD's internal directory) * @param String $format Return format - 'text' or 'numeric' * @return String|Integer If $format is text, It returns strings. Otherwise integer value. */ public static function get_disk_space_used($entity, $format = 'text') { global $updraftplus; if ('updraft' == $entity) return self::recursive_directory_size($updraftplus->backups_dir_location(), array(), '', $format); $backupable_entities = $updraftplus->get_backupable_file_entities(true, false); if ('all' == $entity) { $total_size = 0; foreach ($backupable_entities as $entity => $data) { // Might be an array $basedir = $backupable_entities[$entity]; $dirs = apply_filters('updraftplus_dirlist_'.$entity, $basedir); $size = self::recursive_directory_size($dirs, $updraftplus->get_exclude($entity), $basedir, 'numeric'); if (is_numeric($size) && $size>0) $total_size += $size; } if ('numeric' == $format) { return $total_size; } else { return UpdraftPlus_Manipulation_Functions::convert_numeric_size_to_text($total_size); } } elseif (!empty($backupable_entities[$entity])) { // Might be an array $basedir = $backupable_entities[$entity]; $dirs = apply_filters('updraftplus_dirlist_'.$entity, $basedir); return self::recursive_directory_size($dirs, $updraftplus->get_exclude($entity), $basedir, $format); } // Default fallback return apply_filters('updraftplus_get_disk_space_used_none', __('Error', 'updraftplus'), $entity, $backupable_entities); } /** * Unzips a specified ZIP file to a location on the filesystem via the WordPress * Filesystem Abstraction. Forked from WordPress core in version 5.1-alpha-44182, * to allow us to provide feedback on progress. * * Assumes that WP_Filesystem() has already been called and set up. Does not extract * a root-level __MACOSX directory, if present. * * Attempts to increase the PHP memory limit before uncompressing. However, * the most memory required shouldn't be much larger than the archive itself. * * @global WP_Filesystem_Base $wp_filesystem WordPress filesystem subclass. * * @param String $file - Full path and filename of ZIP archive. * @param String $to - Full path on the filesystem to extract archive to. * @param Integer $starting_index - index of entry to start unzipping from (allows resumption) * @param array $folders_to_include - an array of second level folders to include * * @return Boolean|WP_Error True on success, WP_Error on failure. */ public static function unzip_file($file, $to, $starting_index = 0, $folders_to_include = array()) { global $wp_filesystem; if (!$wp_filesystem || !is_object($wp_filesystem)) { return new WP_Error('fs_unavailable', __('Could not access filesystem.'));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } // Unzip can use a lot of memory, but not this much hopefully. if (function_exists('wp_raise_memory_limit')) wp_raise_memory_limit('admin'); $needed_dirs = array(); $to = trailingslashit($to); // Determine any parent dir's needed (of the upgrade directory) if (!$wp_filesystem->is_dir($to)) { // Only do parents if no children exist $path = preg_split('![/\\\]!', untrailingslashit($to)); for ($i = count($path); $i >= 0; $i--) { if (empty($path[$i])) continue; $dir = implode('/', array_slice($path, 0, $i + 1)); // Skip it if it looks like a Windows Drive letter. if (preg_match('!^[a-z]:$!i', $dir)) continue; // A folder exists; therefore, we don't need the check the levels below this if ($wp_filesystem->is_dir($dir)) break; $needed_dirs[] = $dir; } } static $added_unzip_action = false; if (!$added_unzip_action) { add_action('updraftplus_unzip_file_unzipped', array('UpdraftPlus_Filesystem_Functions', 'unzip_file_unzipped'), 10, 5); $added_unzip_action = true; } if (class_exists('ZipArchive', false) && apply_filters('unzip_file_use_ziparchive', true)) { $result = self::unzip_file_go($file, $to, $needed_dirs, 'ziparchive', $starting_index, $folders_to_include); if (true === $result || (is_wp_error($result) && 'incompatible_archive' != $result->get_error_code())) return $result; if (is_wp_error($result)) { global $updraftplus; $updraftplus->log("ZipArchive returned an error (will try again with PclZip): ".$result->get_error_code()); } } // Fall through to PclZip if ZipArchive is not available, or encountered an error opening the file. // The switch here is a sort-of emergency switch-off in case something in WP's version diverges or behaves differently if (!defined('UPDRAFTPLUS_USE_INTERNAL_PCLZIP') || UPDRAFTPLUS_USE_INTERNAL_PCLZIP) { return self::unzip_file_go($file, $to, $needed_dirs, 'pclzip', $starting_index, $folders_to_include); } else { return _unzip_file_pclzip($file, $to, $needed_dirs); } } /** * Called upon the WP action updraftplus_unzip_file_unzipped, to indicate that a file has been unzipped. * * @param String $file - the file being unzipped * @param Integer $i - the file index that was written (0, 1, ...) * @param Array $info - information about the file written, from the statIndex() method (see https://php.net/manual/en/ziparchive.statindex.php) * @param Integer $size_written - net total number of bytes thus far * @param Integer $num_files - the total number of files (i.e. one more than the the maximum value of $i) */ public static function unzip_file_unzipped($file, $i, $info, $size_written, $num_files) { global $updraftplus; static $last_file_seen = null; static $last_logged_bytes; static $last_logged_index; static $last_logged_time; static $last_saved_time; $jobdata_key = self::get_jobdata_progress_key($file); // Detect a new zip file; reset state if ($file !== $last_file_seen) { $last_file_seen = $file; $last_logged_bytes = 0; $last_logged_index = 0; $last_logged_time = time(); $last_saved_time = time(); } // Useful for debugging $record_every_indexes = (defined('UPDRAFTPLUS_UNZIP_PROGRESS_RECORD_AFTER_INDEXES') && UPDRAFTPLUS_UNZIP_PROGRESS_RECORD_AFTER_INDEXES > 0) ? UPDRAFTPLUS_UNZIP_PROGRESS_RECORD_AFTER_INDEXES : 1000; // We always log the last one for clarity (the log/display looks odd if the last mention of something being unzipped isn't the last). Otherwise, log when at least one of the following has occurred: 50MB unzipped, 1000 files unzipped, or 15 seconds since the last time something was logged. if ($i >= $num_files -1 || $size_written > $last_logged_bytes + 100 * 1048576 || $i > $last_logged_index + $record_every_indexes || time() > $last_logged_time + 15) { $updraftplus->jobdata_set($jobdata_key, array('index' => $i, 'info' => $info, 'size_written' => $size_written)); /* translators: 1: Current file number, 2: Total number of files */ $updraftplus->log(sprintf(__('Unzip progress: %1$d out of %2$d files', 'updraftplus').' (%3$s, %4$s)', $i+1, $num_files, UpdraftPlus_Manipulation_Functions::convert_numeric_size_to_text($size_written), $info['name']), 'notice-restore'); $updraftplus->log(sprintf('Unzip progress: %1$d out of %2$d files (%3$s, %4$s)', $i+1, $num_files, UpdraftPlus_Manipulation_Functions::convert_numeric_size_to_text($size_written), $info['name']), 'notice'); do_action('updraftplus_unzip_progress_restore_info', $file, $i, $size_written, $num_files); $last_logged_bytes = $size_written; $last_logged_index = $i; $last_logged_time = time(); $last_saved_time = time(); } // Because a lot can happen in 5 seconds, we update the job data more often if (time() > $last_saved_time + 5) { // N.B. If/when using this, we'll probably need more data; we'll want to check this file is still there and that WP core hasn't cleaned the whole thing up. $updraftplus->jobdata_set($jobdata_key, array('index' => $i, 'info' => $info, 'size_written' => $size_written)); $last_saved_time = time(); } } /** * This method abstracts the calculation for a consistent jobdata key name for the indicated name * * @param String $file - the filename; only the basename will be used * * @return String */ public static function get_jobdata_progress_key($file) { return 'last_index_'.md5(basename($file)); } /** * Compatibility function (exists in WP 4.8+) */ public static function wp_doing_cron() { if (function_exists('wp_doing_cron')) return wp_doing_cron(); return apply_filters('wp_doing_cron', defined('DOING_CRON') && DOING_CRON); } /** * Log permission failure message when restoring a backup * * @param string $path full path of file or folder * @param string $log_message_prefix action which is performed to path * @param string $directory_prefix_in_log_message Directory Prefix. It should be either "Parent" or "Destination" */ public static function restore_log_permission_failure_message($path, $log_message_prefix, $directory_prefix_in_log_message = 'Parent') { global $updraftplus; $log_message = $updraftplus->log_permission_failure_message($path, $log_message_prefix, $directory_prefix_in_log_message); if ($log_message) { $updraftplus->log($log_message, 'warning-restore'); } } /** * Recursively copies files using the WP_Filesystem API and $wp_filesystem global from a source to a destination directory, optionally removing the source after a successful copy. * * @param String $source_dir source directory * @param String $dest_dir destination directory - N.B. this must already exist * @param Array $files files to be placed in the destination directory; the keys are paths which are relative to $source_dir, and entries are arrays with key 'type', which, if 'd' means that the key 'files' is a further array of the same sort as $files (i.e. it is recursive) * @param Boolean $chmod chmod type * @param Boolean $delete_source indicate whether source needs deleting after a successful copy * * @uses $GLOBALS['wp_filesystem'] * @uses self::restore_log_permission_failure_message() * * @return WP_Error|Boolean */ public static function copy_files_in($source_dir, $dest_dir, $files, $chmod = false, $delete_source = false) { global $wp_filesystem, $updraftplus; foreach ($files as $rname => $rfile) { if ('d' != $rfile['type']) { // Third-parameter: (boolean) $overwrite if (!$wp_filesystem->move($source_dir.'/'.$rname, $dest_dir.'/'.$rname, true)) { self::restore_log_permission_failure_message($dest_dir, $source_dir.'/'.$rname.' -> '.$dest_dir.'/'.$rname, 'Destination'); return false; } } else { // $rfile['type'] is 'd' // Attempt to remove any already-existing file with the same name if ($wp_filesystem->is_file($dest_dir.'/'.$rname)) @$wp_filesystem->delete($dest_dir.'/'.$rname, false, 'f');// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- if fails, carry on // No such directory yet: just move it if ($wp_filesystem->exists($dest_dir.'/'.$rname) && !$wp_filesystem->is_dir($dest_dir.'/'.$rname) && !$wp_filesystem->move($source_dir.'/'.$rname, $dest_dir.'/'.$rname, false)) { self::restore_log_permission_failure_message($dest_dir, 'Move '.$source_dir.'/'.$rname.' -> '.$dest_dir.'/'.$rname, 'Destination'); $updraftplus->log_e('Failed to move directory (check your file permissions and disk quota): %s', $source_dir.'/'.$rname." -> ".$dest_dir.'/'.$rname); return false; } elseif (!empty($rfile['files'])) { if (!$wp_filesystem->exists($dest_dir.'/'.$rname)) $wp_filesystem->mkdir($dest_dir.'/'.$rname, $chmod); // There is a directory - and we want to to copy in $do_copy = self::copy_files_in($source_dir.'/'.$rname, $dest_dir.'/'.$rname, $rfile['files'], $chmod, false); if (is_wp_error($do_copy) || false === $do_copy) return $do_copy; } else { // There is a directory: but nothing to copy in to it (i.e. $file['files'] is empty). Just remove the directory. @$wp_filesystem->rmdir($source_dir.'/'.$rname);// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Silenced to suppress errors that may arise because of the method. } } } // We are meant to leave the working directory empty. Hence, need to rmdir() once a directory is empty. But not the root of it all in case of others/wpcore. if ($delete_source || false !== strpos($source_dir, '/')) { if (!$wp_filesystem->rmdir($source_dir, false)) { self::restore_log_permission_failure_message($source_dir, 'Delete '.$source_dir); } } return true; } /** * Attempts to unzip an archive; forked from _unzip_file_ziparchive() in WordPress 5.1-alpha-44182, and modified to use the UD zip classes. * * Assumes that WP_Filesystem() has already been called and set up. * * @global WP_Filesystem_Base $wp_filesystem WordPress filesystem subclass. * * @param String $file - full path and filename of ZIP archive. * @param String $to - full path on the filesystem to extract archive to. * @param Array $needed_dirs - a partial list of required folders needed to be created. * @param String $method - either 'ziparchive' or 'pclzip'. * @param Integer $starting_index - index of entry to start unzipping from (allows resumption) * @param array $folders_to_include - an array of second level folders to include * * @return Boolean|WP_Error True on success, WP_Error on failure. */ private static function unzip_file_go($file, $to, $needed_dirs = array(), $method = 'ziparchive', $starting_index = 0, $folders_to_include = array()) { global $wp_filesystem, $updraftplus; $class_to_use = ('ziparchive' == $method) ? 'UpdraftPlus_ZipArchive' : 'UpdraftPlus_PclZip'; if (!class_exists($class_to_use)) updraft_try_include_file('includes/class-zip.php', 'require_once'); $updraftplus->log('Unzipping '.basename($file).' to '.$to.' using '.$class_to_use.', starting index '.$starting_index); $z = new $class_to_use; $flags = (version_compare(PHP_VERSION, '5.2.12', '>') && defined('ZIPARCHIVE::CHECKCONS')) ? ZIPARCHIVE::CHECKCONS : 4; // This is just for crazy people with mbstring.func_overload enabled (deprecated from PHP 7.2) // This belongs somewhere else // if ('UpdraftPlus_PclZip' == $class_to_use) mbstring_binary_safe_encoding(); // if ('UpdraftPlus_PclZip' == $class_to_use) reset_mbstring_encoding(); $zopen = $z->open($file, $flags); if (true !== $zopen) { return new WP_Error('incompatible_archive', __('Incompatible Archive.'), array($method.'_error' => $z->last_error));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } $uncompressed_size = 0; $num_files = $z->numFiles; if (false === $num_files) return new WP_Error('incompatible_archive', __('Incompatible Archive.'), array($method.'_error' => $z->last_error));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. for ($i = $starting_index; $i < $num_files; $i++) { if (!$info = $z->statIndex($i)) { return new WP_Error('stat_failed_'.$method, __('Could not retrieve file from archive.').' ('.$z->last_error.')');// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } // Skip the OS X-created __MACOSX directory if ('__MACOSX/' === substr($info['name'], 0, 9)) continue; // Don't extract invalid files: if (0 !== validate_file($info['name'])) continue; if (!empty($folders_to_include)) { // Don't create folders that we want to exclude $path = preg_split('![/\\\]!', untrailingslashit($info['name'])); if (isset($path[1]) && !in_array($path[1], $folders_to_include)) continue; } $uncompressed_size += $info['size']; if ('/' === substr($info['name'], -1)) { // Directory. $needed_dirs[] = $to . untrailingslashit($info['name']); } elseif ('.' !== ($dirname = dirname($info['name']))) { // Path to a file. $needed_dirs[] = $to . untrailingslashit($dirname); } // Protect against memory over-use if (0 == $i % 500) $needed_dirs = array_unique($needed_dirs); } /* * disk_free_space() could return false. Assume that any falsey value is an error. * A disk that has zero free bytes has bigger problems. * Require we have enough space to unzip the file and copy its contents, with a 10% buffer. */ if (self::wp_doing_cron()) { $available_space = function_exists('disk_free_space') ? @disk_free_space(WP_CONTENT_DIR) : false;// phpcs:ignore Generic.PHP.NoSilencedErrors.Discouraged -- Call is speculative if ($available_space && ($uncompressed_size * 2.1) > $available_space) { return new WP_Error('disk_full_unzip_file', __('Could not copy files.').' '.__('You may have run out of disk space.'), compact('uncompressed_size', 'available_space'));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } } $needed_dirs = array_unique($needed_dirs); foreach ($needed_dirs as $dir) { // Check the parent folders of the folders all exist within the creation array. if (untrailingslashit($to) == $dir) { // Skip over the working directory, We know this exists (or will exist) continue; } // If the directory is not within the working directory then skip it if (false === strpos($dir, $to)) continue; $parent_folder = dirname($dir); while (!empty($parent_folder) && untrailingslashit($to) != $parent_folder && !in_array($parent_folder, $needed_dirs)) { $needed_dirs[] = $parent_folder; $parent_folder = dirname($parent_folder); } } asort($needed_dirs); // Create those directories if need be: foreach ($needed_dirs as $_dir) { // Only check to see if the Dir exists upon creation failure. Less I/O this way. if (!$wp_filesystem->mkdir($_dir, FS_CHMOD_DIR) && !$wp_filesystem->is_dir($_dir)) { return new WP_Error('mkdir_failed_'.$method, __('Could not create directory.'), substr($_dir, strlen($to)));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } } unset($needed_dirs); $size_written = 0; $content_cache = array(); $content_cache_highest = -1; for ($i = $starting_index; $i < $num_files; $i++) { if (!$info = $z->statIndex($i)) { return new WP_Error('stat_failed_'.$method, __('Could not retrieve file from archive.'));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } // directory if ('/' == substr($info['name'], -1)) continue; // Don't extract the OS X-created __MACOSX if ('__MACOSX/' === substr($info['name'], 0, 9)) continue; // Don't extract invalid files: if (0 !== validate_file($info['name'])) continue; if (!empty($folders_to_include)) { // Don't extract folders that we want to exclude $path = preg_split('![/\\\]!', untrailingslashit($info['name'])); if (isset($path[1]) && !in_array($path[1], $folders_to_include)) continue; } // N.B. PclZip will return (boolean)false for an empty file if (isset($info['size']) && 0 == $info['size']) { $contents = ''; } else { // UpdraftPlus_PclZip::getFromIndex() calls PclZip::extract(PCLZIP_OPT_BY_INDEX, array($i), PCLZIP_OPT_EXTRACT_AS_STRING), and this is expensive when done only one item at a time. We try to cache in chunks for good performance as well as being able to resume. if ($i > $content_cache_highest && 'UpdraftPlus_PclZip' == $class_to_use) { $memory_usage = memory_get_usage(false); $total_memory = $updraftplus->memory_check_current(); if ($memory_usage > 0 && $total_memory > 0) { $memory_free = $total_memory*1048576 - $memory_usage; } else { // A sane default. Anything is ultimately better than WP's default of just unzipping everything into memory. $memory_free = 50*1048576; } $use_memory = max(10485760, $memory_free - 10485760); $total_byte_count = 0; $content_cache = array(); $cache_indexes = array(); $cache_index = $i; while ($cache_index < $num_files && $total_byte_count < $use_memory) { if (false !== ($cinfo = $z->statIndex($cache_index)) && isset($cinfo['size']) && '/' != substr($cinfo['name'], -1) && '__MACOSX/' !== substr($cinfo['name'], 0, 9) && 0 === validate_file($cinfo['name'])) { $total_byte_count += $cinfo['size']; if ($total_byte_count < $use_memory) { $cache_indexes[] = $cache_index; $content_cache_highest = $cache_index; } } $cache_index++; } if (!empty($cache_indexes)) { $content_cache = $z->updraftplus_getFromIndexBulk($cache_indexes); } } $contents = isset($content_cache[$i]) ? $content_cache[$i] : $z->getFromIndex($i); } if (false === $contents && ('pclzip' !== $method || 0 !== $info['size'])) { return new WP_Error('extract_failed_'.$method, __('Could not extract file from archive.').' '.$z->last_error, json_encode($info));// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } if (!$wp_filesystem->put_contents($to . $info['name'], $contents, FS_CHMOD_FILE)) { return new WP_Error('copy_failed_'.$method, __('Could not copy file.'), $info['name']);// phpcs:ignore WordPress.WP.I18n.MissingArgDomain -- The string exists within the WordPress core. } if (!empty($info['size'])) $size_written += $info['size']; do_action('updraftplus_unzip_file_unzipped', $file, $i, $info, $size_written, $num_files); } $z->close(); return true; } } Web Development, Author at Smart Office - Page 7 of 8

    Smart Office

    Desk for a tight space

    Are you looking for something different for your home office or is it simply a case of not having enough space. Now there is hope with the launch at the second largest PC show in the world, Computex of a desk that has the lot. It’s an aluminium desk with a built in case, or at least parts of one.

    The L shaped desk has a computer slot in the right hand side, and it snugly fits a standard looking computer case. Above your right knee when you are sitting at the desk is a multi-card reader, and above your left knee is a hidden power strip.

     This whole concept of computing spaces, with integrated bits has a lot of promise. Rather than a PC that fits your work space, you have a PC that IS your workspace. Problems with proprietary parts are a potential hazard, but it is nothing that can not be worked around. For corporate settings, it holds even more promise than home use. Hopefully this will become widespread soon.

     

    Who Will Win The Storage War? EMC or Symantec

    A battle royale is set to break out in the SMB storage market between EMC and Symantec-owned Veritas. now At stake is a multi-billion dollar share of the storage backup market.

    Veritas is on top at the moment, but EMC is getting ready to hit them smack in the guts with a combination of hardware and software solutions.
    For Veritas, the problem is their new owners Symantec, with many pundits tipping that the deal will impact the company significantly. Investors are already voting on the deal by ripping millions of dollars worth of value out of the company, and common sense would suggest that there are few synergies to be had between a company whose main business is protecting desktop computers from viruses, and one whose bread and butter is the backup of business data. But Symantec’s acquisition of Veritas for $13.5 billion is a move that will create the world’s fourth largest software supplier.
    The acquisition comes just when EMC (under the leadership of CEO and president Joe Tucci) is further flexing its storage muscles with a move into the SMB storage market through the acquisition of Dantz – makers of the highly successful Retrospect backup software. This, the market says, is a far more palatable deal, as it will allow the IT channel to sell both hardware and software from the same company.
    When EMC acquired Dantz, the market took the shares up but this cannot be said for Symantec. Criticism of the Veritas deal came hard and fast. Symantec shareholders showed their displeasure, driving the companies stock down by a quarter, from about $33 before the deal was announced on December 16, 2005 to about $25 soon after. The crash prompted Symantec chairman and CEO John Thompson to state: “I don’t understand the haircut we got, I just don’t.” But at the time of writing Symantec’s stock has fallen further still, to just over $22. So the question is this: were Symantec’s shareholders right to be spooked by this deal, or do they simply not understand Thompson’s vision? Or does the market – and in particular the IT channel – see the EMC solution for the SMB storage market as being a better proposition?
    One company that was evaluating the potential acquisition of Veritas at the time Symantec snapped up the deal was HP, and insiders now say that the failure of Carly Fiorina to acquire Veritas was one more reason for her sacking.
    Symantec and Veritas are two similarly-sized companies and are dependent on the SMB market for their success, and while they operate in very different markets, the same reseller and solution provider is selling their products.
    Although both companies have a history of strong growth, they both face long-term challenges for their survival. Symantec mostly supplies anti-virus and firewall software, and half of its business is with consumers. Veritas sells storage management software, and virtually all of its customers are businesses. But, they argue that the proposed stock-swap merger – worth around $13.5billion – will not only give the two the size they need to survive in a consolidating IT market, but will combine two sets of products that belong together.
    The deal has seen Symantec’s Thompson take the reins as CEO of the combined entity, with former Veritas CEO Gary Bloom taking a secondary role as a co-president and vice chairman responsible
    for sales, service and support.
    It was not only Symantec’s stockholders who were critical of the deal, so too were a number of analysts. Several brokers cut their assessments of Symantec’s potential stock performance on the announcement of the deal. For example, Credit Suisse First Boston offered a critical view of the merger plan: “Although Symantec will possess a much broader product portfolio when merged with Veritas, we are concerned that the combined product set in its present status does not form a truly holistic infrastructure management solution that offers comprehensive visibility across the IT infrastructure, which could provide cross-selling leverage and increase growth rates for both companies’ core businesses.”
    The broker hit on a key question: does this merger offer up any real synergies? Does it create new and improved opportunities for the combined entity, or should we expect each company’s sales trajectories to remain the same?
    The answer from one specialist storage analyst, Taneja Group, was less than comforting for Thompson and Bloom. Senior analyst Alex Gorbansky told CBR that he is overwhelmingly “negative” on the proposed acquisition. He said it does not hold the potential for strong synergies for either company: “What one looks for in a deal of this size is for one plus one to equal three, and I can’t see that is the case here. One company is security, the other is data management and infrastructure. The potential for cross-bundling and cross-selling the software is not immediately apparent.”
    Another analyst told SHR: “EMC are in a box-seat to take market share away from Veritas. They have solutions for the SMB market with Retrospect. This is ideal for the sub-75 user market who are buying Microsoft’s small business server. For anything above that they have Legato. Veritas can only go down from here.
    “Also working for EMC is the Dell relationship. While there are some issues with who owns what market, the Dell factor is contributing to EMC’s bottom line. Dell will recommend Retrospect not Veritas, however this does not stop a Dell customer choosing Veritas over Retrospect in the SMB market.” 
    So what are the synergies claimed by Symantec and Veritas that many analysts are struggling to identify? Veritas’s Bloom said the company would initially look at bundling opportunities and “loosely coupled” integration. Deep product integration will also happen, but will take longer, he added.
    The CEOs said there was a “great packaging opportunity to deliver a complete environment for [Microsoft’s messaging server] Exchange”, where Symantec provides email security and Veritas recently acquired archiving capabilities.
    However, Alex Gorbansky is unimpressed by the idea of an integrated security and storage management product targeted at Microsoft’s Exchange environment. Asked whether it could work, he said, “I suppose it could, but is that really something that two multi-billion dollar companies get together for. Why not just do it as a joint partnership?”
    There was one positive note to Gorbansky’s take on the deal. He said Veritas stockholders are at least getting a premium for their shares.
    A big threat on Symantec and EMC’s horizon is Microsoft. Microsoft is hovering on the edges of the security market, and when it takes the plunge, there will be a lot less water left for EMC or Symantec or any of the other security incumbents. However, EMC does have hardware to fall back on, whereas Symantec relies on hardware partners.
    Microsoft’s security initiative, Trustworthy Computing, is around three years old, but until recently the only public face of the strategy was the patches and technologies released to make existing Microsoft products more secure. But, late last year, the company began giving away personal firewall software as part of Windows XP Service Pack 2, a security-focused upgrade to XP. That alone could burn a big hole in Symantec’s revenue.
    Potentially even more damaging would be Microsoft’s entry into the anti-virus market. Microsoft lined itself up to do this in 2003 when it bought GeCad, a Romanian anti-virus software developer. One issue that may have been holding the company back from bundling this software with Windows is the risk of provoking yet more anti-trust legal action. The other is the complexity and scale of support needed for anti-virus products.
    Symantec can, however, see the inevitable coming. Its CEO John Thompson recently said: “I’m not totally sure what the packaging and pricing is going to be, but it is clear they [Microsoft] intend to enter the market at some point.”
    How quickly Symantec and Veritas are able to merge their product lines, sales forces and marketing strategies will play a key role in whether this merger succeeds or fails. There have been mixed reactions to the deal from analysts, but Symantec’s stock slide suggests that a great many shareholders are not convinced of the logic. They started abandoning the stock even before Symantec was able to explain its motivation for the deal and, while the Symantec Veritas is being debated, EMC’s stock has continued to climb with more acquisitions tipped in coming months.

     

     

     

     

     


     

     

     


     

    O2 Combines Business and Pleasure

    O2 delivered a new white knight in shining armour today to Australians looking for a mobile device to save them from the boredom and demands of everyday life.

    The O2 Xphone IIm incorporates business tools and multimedia entertainment features in a neat ceramic-white finish complemented by smart silver-grey accents.
    “Australians are looking to do more with their mobiles and we are offering the O2 Xphone IIm as a solution,” said Oceanic VP of O2 John Featherstone. “The O2 Xphone IIm is a knight in shining armour to people who want to be rescued from the boredom of commuting or who are trapped in the demands of work. The O2 Xphone IIm lets you chill with its mini entertainment centre while you commute during peak hours or work away from the office with its smart productivity features.”

    O2 runs Windows Mobile 2003 as an operating system and the Xphone IIm offers wireless modem, GPRS and Bluetooth connectivity. Armed with Microsoft Pocket Outlook and MSN Messenger to keep users in touch the phone’s entertainment and Internet features have been beefed up too.

    Windows Media Player 10 gets a dedicated softkey for easy access. But the neat thing about this phone is that you can set the device to download music, video or games from websites onto the on-board memory or to a miniSD card.

    The O2 Xphone IIm supports a wide range of music file formats, including WAV, WMA, MP3, AMR and AAC. In addition, the microphone and loudspeaker offer crystal clear sound to support vivid MPEG-4 video playback, and 32-chord polyphonic, MP3, WAV and WMA ring tones.

    The O2 Xphone IIm offers greater convenience with its dual-function keys which allow you to access a wide variety of the device’s functions in fewer clicks.  For example, a quick press of the “Home” button brings you back to your designated home page. A longer press of the same button turns off Bluetooth or GPRS.  Its streamlined architecture is further improved by O2 Menu, a simple and intuitive icon panel which makes most phone functions accessible within two clicks.

    Priced at $649 (RRP), the phone is distributed by Ingram Micro and Brightpoint Australia.

     

    Storage Blow For Sony

    Sony have been dealt a major blow following a decision by Microsoft and Intel to support Toshiba’s HD-DVD format over Blu-Ray.

    It is exepected that Intel & Microsoft will announce that they are backing the HD-DVD format developed by Toshiba over the Blu-ray standard championed by Sony, Matsushita Electric, Samsung and others. Microsoft announced in June that it would work with Toshiba to develop high-definition DVD players. Now, Microsoft and Intel say they will develop software and chips that will allow personal computers to play the next-generation DVD’s from Toshiba.

    According to the New York Times the two companies said they had not ruled out incorporating Blu-ray technology in their operating systems and on their chips in the future. But they are convinced that as of now, the HD-DVD format discs can be produced more cheaply and more quickly than the Blu-ray discs, and are therefore likely to become the dominant technology. For the last two years, Microsoft and Intel have been careful not to alienate either camp in the format battle because they sell software and components to companies on each side. They also hoped that the electronics makers and Hollywood studios developing the formats would reach a compromise.

    But the major Hollywood studios are now split between the formats, and electronics companies on both sides plan to start selling next-generation DVD players as early as December 2005. Sony also plans to include Blu-ray technology in its new PlayStation 3 game console to be released next year.

    As the format standoff has deepened, demand for the current generation of DVD’s and DVD players has slowed, alarming Hollywood studios, which have come to depend heavily on disc sales. The studios, as well as electronics makers and computer manufacturers, expect high-definition discs to restart sales growth. But the lack of a resolution over the future format has slowed the changeover.  “We were neutral for a long time,” Jordi Ribas, the director of technical strategy for Windows at Microsoft, said. “But we’re approaching the time when this has to come to market and from our standpoint, the earlier the better.”

    As early as last year, however, some industry executives said that Microsoft was likely to side with the Toshiba camp.

    Though Microsoft and Intel do not make DVD machines, they benefit from the sale of next-generation discs because consumers will also want to play the new discs on their PC’s. That means that the computer operating system will have to be designed to read those discs.

    Microsoft and Intel say that Toshiba has proven that its discs can be copied onto hard drives and home servers and sent over home networks. The companies also favor the “hybrid” disc developed by Toshiba that includes a standard definition version of a movie on one side and a high-definition version on the other side.

    Their decision to support Toshiba’s HD-DVD format also creates another fissure in the tug of war between the companies backing the two formats.

    For instance, Dell and Hewlett-Packard, two of the world’s largest PC makers, are part of the Blu-ray group. Their computers, assuming they include Microsoft and Intel products, will be capable of playing HD-DVD discs. But if they want their machines to play Blu-ray discs, they may have to find a third-party to design software for them.

    In addition to developing software to play HD-DVD discs on PC’s, Microsoft may also create software so its new Xbox game console, which will be released later this year, will be able to play HD-DVD discs.

    In an exclusive interview with Tom’s Hardware Guide, one of Microsoft’s lead representatives on the DVD Forum Steering Committee said that decisions regarding whether his company and Intel would back and promote HD DVD as a high-definition video disc standard, were determined only within the last few days. Prior to some critical recent developments and announcements, both companies – which had proclaimed neutrality – may have been ready to back Blu-ray.

    “Until now, we viewed ourselves more as a technology provider for both groups,” said Jordi Ribas, Microsoft’s director of technology strategy for Windows Digital Media, and a key developer of the VC-1 codec currently in use by both HD DVD and Blu-ray. He revealed that Microsoft and Intel had produced a list of what he called “key requirements for the success of next-generation DVD.” For several months, while those requirements were being circulated, both companies worked on developing key standards to be implemented by both formats. Ribas said he was directly involved with implementing the VC-1 codec, and also worked jointly with Disney to produce the iHD interactive layer considered by both camps, but eventually adopted only by HD DVD (Disney is a member of the Blu-ray Disc Association.) During that time, Intel and Microsoft both maintained their public neutrality. But very recently, from the two companies’ perspective, things started unraveling unexpectedly for Blu-ray. hddvd2
    “Our decision is based mainly on where the formats are today,” Ribas said, referring to Microsoft. “A year and a half ago, both format organizations had very similar goals, and to some extent, the story of Blu-ray was actually very powerful. It had higher capacity, it had what we would consider benefits at the time. But then as time went on, and we’d seen what’s the reality of both formats today, and what were promises versus what’s proven and what’s real, that’s when we decided to make the decision.”

    Blu-ray failed the Intel/Microsoft test in six critical areas, Ribas told us, referring to a document listing those areas that a Microsoft spokesperson provided to Tom’s Hardware Guide:

    First, and perhaps foremost, is the ability for a consumer to make authorized copies of a legally obtained disc, in order to store the content on a hard drive and stream it to devices around the house. Intel particularly wants this capability for its Viiv home entertainment platform, announced last month. “We think it’s a great consumer win, and it’s a great industry win, to be able to ensure that with good copy protection, you can have so much functionality for the user,” Rivas told us. But when recently questioned about its support for these features, Ribas said, although Blu-ray had appeared supportive at one time, its current stance is now uncommitted.

    Support for hybrid discs that can be read in both current DVDs and future players, was the second critical element. This would “future-proof” new releases, enabling consumers to buy DVDs that can play in today’s players, while also providing high-def content for tomorrow’s. “That’s something that both promised,” said Ribas, “but HD DVD delivered, and Blu-ray has not – and it seems it’s nowhere in sight. [Blu-ray has] claimed they have it in the lab, but to go from the lab to mass production is like night and day. There’s a lot of effort that needs to happen. So as of now, there’s nothing that leads us to believe that that’s going to be possible [from Blu-ray] at this point.”

    Maintaining low production costs is a critical factor, which has been a key HD DVD talking point in light of current revelations about factory upgrade costs for Blu-ray. “For a long time, we actually thought that the Blu-ray Group had the upper hand in costs,” Ribas said, mainly because of the involvement in Blu-ray of most of the major Japanese CE manufacturers – Sony, Matsushita (Panasonic), Pioneer, and Sharp – as well as Philips. Here is where recent events played a critical role: In a development that was brought to light only this morning, two of the world’s leading China-based DVD player production facilities announced their support for HD DVD over Blu-ray. In press statements, these companies cited the relative openness of the DVD Forum compared to the Blu-ray Disc Association. “Now that we see China embracing HD DVD,” said Ribas, “we actually see that on the cost side, HD DVD will have an advantage, because the Chinese have been the ones who have lowered the prices, via the competition, for HD DVD players.” As much as 75 percent of DVD players sold in America today come from China, he added.

    Maintaining low disc replication costs affects the consumer price for media, said Ribas, which would play into any price/performance evaluation. A disc production factory can make minor upgrades to its equipment, he stated, with the result being equipment that can produce both conventional DVD as well as HD DVD. Citing figures circulating this week throughout the industry, Ribas said it would cost as much as $1.7 million per production line to install Blu-ray disc production equipment, and as much as $2.0 million for each new mastering system installed. That’s a significant expense, he explained, for a business which only turns over a 10 percent margin.

    The surprise entry in Microsoft’s and Intel’s list of failures is disc storage capacity. On paper, Blu-ray appears to have the advantage. But the two companies looked beneath the paper: Capacity, said Ribas, “used to be the biggest advantage of Blu-ray, and we believed it. We thought, they’ll get 50 GByte BD-ROM discs working, but it’s not happening, and it’s nowhere in sight. There are not even pilots. It’s only in the lab that they are building these discs.” With regard to demonstrated capacity, he told us, HD DVD-ROM actually leads BD-ROM by a score of 30 GByte to 25 GByte.

    The final entry is interactivity standards. Although Microsoft and Disney jointly developed the iHD interactivity layer, based on XML – which is the glue that holds together the “Vista vision” of Microsoft’s future Windows platform – and even though Disney is a Blu-ray proponent, the Association chose instead to endorse BDJ, an implementation of Sun-s Java Mobile Edition. Ribas told us that the major studios – either publicly or quietly – are opposed to BDJ, citing its relative complexity and its lack of compelling new features compared to iHD. An optional commentary track for videos, for example, that superimposes the speaker’s image on-screen as well as providing audio, is one key iHD feature that BDJ will support only as an option, maybe. “Which means nobody will use it,” said Ribas.

    “Intel was looking at similar issues,” said Ribas, “and [we] realized, ‘We are getting very close to getting these things into the market, we have to stop hoping or expecting or believing promises. We have to look at what’s real and what’s not.’ That’s where our decision came from.”

    Ribas told us more about his and his company’s expectations for the future of video disc technologies and interactive media in general.

     

    Intel Set To Launch New Chip Architecture

    Intel is set to launch several new initatives at this weeks Intel Developers Forum being held in the USA.

    Intel is set to unveil the next generation architecture at Intel Developers’ Forum set to kick off in the USA today. Newly annointed CEO Paul Otellini, is also set to reveal several other Intel initatives including an outline of the company’s SmartHouse offerings.

    But the story of the week may very well become the triumph of Intel’s Israel Design Center (IDC), whose more moderate approach to processor architecture has won that team several architectural victories of late–not only over arch-rival AMD, but also over Intel’s own NetBurst architecture, which may very well follow the path Itanium has carved toward Intel’s back burner.

    “The rule of thumb in ‘NetBurst land,'” Nathan Brookwood, principal analyst with the Insight64 consultancy, told Tom’s Hardware Guide this afternoon, “was just throw clock frequency at the problem, and you’ll get more performance almost without thinking. And it turns out we’ve run into the end of that era. The Israelis saw that coming.”

    With all the recent innovations in multicore CPU packaging, microarchitecture–the design of processor engine components–has recently assumed a secondary role in public conversation. Lately, the talk has been about what Brookwood characterizes as, “How many cores can you fit on the head of a pin?” As a result, what’s happening inside each individual core hasn’t been a front-burner topic. So if you were to judge tomorrow’s likely IDF news from a multicore vantage point alone, you might overlook an upheaval going on beneath the core-level: The so-called NetBurst architecture which was the key feature of Pentium 4 when it was introduced in 2000, is being phased out.

    NetBurst had originally introduced Intel’s first 20-stage execution pipeline, and proceeded to grow the pipeline from there, having shipped a P4 with a 31-stage pipeline, according to Brookwood, and having cancelled a product that would have included a 40-stage pipeline. Longer pipelines were originally introduced, according to Intel, to enable greater pre-assessment andoptimisation of machine code prior to execution.

    “A very long pipeline turns out to be extremely inefficient,” said Brookwood. “Therefore, although you felt good because you had a 3 GHz processor, in reality, it wasn’t delivering any more performance than a 2 GHz processor with shorter
    pipelines. But it used a lot more power and generated a lot more heat.”

    The Israeli team’s alternative was Pentium M, introduced in March 2003. As Brookwood confirmed, Intel conducted some convincing tests indicating Pentium M performance on a par with Pentium 4 in everyday, general-purpose applications– even though the P4 was expected to yield as much as four times the performance, and even though Pentium M units feature as small as 10-stage pipelines.

    “So from moving from the NetBurst core to a new core based on the Israeli techniques,” added Brookwood, “I think Intel will end up with a core that scales better with frequency.” With lower power consumption, you can put two or four of
    the new cores on a single chip, and still preserve what he called “reasonable thermal characteristics.” The new architecture will also mark the first time that desktop and server CPU architectures were derived from a mobile platform. As Brookwood reminded us, the Pentium III architecture was modified once to create the “Mobile” edition, and
    then modified a second time to create the first Xeon processors. But the Israeli design team was first commissioned five years ago to develop a mobile processor architecture that could meet what were then considered the extreme thermal conditions of notebook and laptop systems. The solution to the mobile thermal problem became the solution to the desktop and server thermal problem a few years later. “This represents the triumph of the power-efficient design methodologies that came out of Israel,” said Brookwood, “moving into Intel’s mainstream desktop, and server lines, as well as next-generation mobile processors.”

    Tomorrow’s announcements are expected to indicate that the so-called Merom processor architecture–first code-named in 2004–will serve as the basis for the Conroe desktop CPU architecture and the Woodcrest server CPU architecture.

    Oftentimes, smart companies publish bad news on the heels of an otherwise good-news day. So if rumors put forth in the Inquirer this afternoon are correct that HP plans to cancel its planned orders for Itanium-based systems–in the wake of HP’s already having cancelled its collaboration with Intel on Itanium’s design–then this news could conceivably come during IDF.

    While unable to confirm such rumors himself, Insight 64’s Nathan Brookwood speculated, “If HP were to turn down Montecito…that would, I think, cause a great deal of reassessment in almost all parts of the industry that touch
    Itanium.”

    Other announcements expected no later than Wednesday include whether Intel has stepped up its plans to proceed toward 45 nm lithography–thus bending the curve of Moore’s Law up just slightly–as well as a new, lower-wattage dual-core Xeon
    processor.

    Tom’s Hardware Guide from whom SOR takes content has a team in San Francisco to cover the events of the Intel Developers’ Forum as they happen. Stay in touch with us for breaking news all this week, including your first look at the new Pentiums.

     

    See www.tomshardware.com

     

     

    EMC Steals Microsofts Thunder

    In what looks like an attempt to spike Microsoft’s publicity guns, EMC has unveiled a CDP-like product that is an update to its existing RepliStor software.

    The SMB mid-range Windows server-hosted software is not a true continuous data protection product. EMC said it still plans to launch true high-end CDP software soon.

    By introducing the ability to trigger snapshots of file systems in RepliStor version 6.1, EMC has created a product that is comparable to Microsoft’s first ever standalone backup product, its Data Protection Manager software. Usually EMC does not announce products until they are shipping or are very close to shipping.

    But there is no date set for the RepliStor update, and EMC said only that it will ship before the year end. Microsoft is expected to make the official launch of DPM soon, and that may have inspired EMC’s decision to unveil and detail a product unusually far ahead of its GA date.

    Exactly as DPM, RepliStor 6.1 does not qualify as a true continuous data protection product because the Windows server-based software does not capture every change made to data. Instead it simply stores a series of snapshots of file systems, as does DPM.

    On a technical basis, that separates RepliStor and DPM from true CDP products that are also aimed at backing up file level data but capture every change made to data. These products include IBM Corp’s recently launched Tivoli CDP for Files and Symantec Corp’s imminent Panther product. But RepliStor is also different from those products and from DPM in its support for Exchange email databases, and its lack of a self-service file recovery facility.

    While DPM and the Tivoli products cannot yet backup Exchange databases, RepliStor 6.1 can. Score one for RepliStor’s ability to protect data for Exchange 2003, a key and almost ubiquitous application.

    On the other hand, RepliStor does not provide the major advantage of a self-service web portal that allows end-users to recover lost files, speeding file recovery and eliminating what can be a major workload for IT administrators. The IBM and Symantec products both offer this service.

    “We don’t have that at the moment. But at least the IT people themselves have access to the [RepliStor generated] backup data,” said EMC’s director of product marketing Rob Emsley.

    RepliStor was first launched by Legato around four years ago, as Windows server-based asynchronous replication software, which mirrors file data from remote offices or departments to central locations in many-to-one configurations. Evaluator Group analyst Dennis Martin said that RepliStor began life in the mid-nineties as a product called Octopus. It was later acquired by Legato, which was itself bought by EMC in 2003.

    “This is a market that’s really heating up, at least on the vendor side. There are so many suppliers trying to crack the nut. Backup has always been a problem in IT,” Mr Martin said.

    RepliStor 6.1 will be able to store up to 64 file system snapshots. It will list from around $2,000 per server. The software will trigger snapshots via the Windows VSS interface, using either the Windows snapshot engine, or disk array-based snapshot tools. Using the latter will improve performance, EMC said.

    Storage Wars As Western Digital Battle Seagate

    As several vendors spruik cloud deals a backup storage war has broken out in Australia with two of the biggest players Seagate and Western Digital going head to head with smart new storage products.

    The battle intensified yesterday as major rivals Seagate and Western Digital both announced new consumer products that offer easy backups linked to online services.

    Seagate introduced its new BackUp Plus line of consumer portable drives at a media function staged – for reasons not immediately apparent – in a Sydney tenpin bowling and Laser Skirmish shoot-em-up alley.

    In what B. S. Teh, MD for Asia-Pac and Japan, claimed is an industry first, the drives back up from personal computers with consummate ease – and also save, share and back-up photos and other content on social networking sites like Facebook and Flickr. They can be switched between Macs and Windows PCs without reformatting, Seagate stresses.

    The company’s Dashboard software is said to enable one-click local backups as well as a simple way to save and share photos and videos on the social media sites.

    The portable 2.5 inch drives come in red, blue, silver and black, with prices starting at A$139 for a 500GB unit. There’s also a super-svelte unit called Slim, just 9.5mm thick, for $159; and a 3.5-inch desk-based drive (in basic black) starting at $179 for one terabyte – and with additional modes offering up to 4TB.

    – Meanwhile, rival Western Digital announced its new “Personal Cloud” line of “MyBook” drives, said to offer integration with cloud storage service Dropbox, using the company’s WD 2go software “The new version of WD 2go lets customers move, copy and share content between their mobile device, their Dropbox and their WD personal cloud,” according to an e-mailed press release.