Varnish config for wordpress with ngx_pagespeed and wp-touch

This is the Varnish config I am using currently. It is working with wp-touch, pagespeed and wordpress and (bonus) deals with the pagespeed not allowing pages to cache. No time for pretty comments and explanations, here’s the code. I will answer questions, or come back and explain the code in comments – but it is pretty self explanatory.

backend default {
.host = "127.0.0.1";
.port = "80";
.first_byte_timeout = 300s;
}

sub generate_user_agent_based_key {
set req.http.default_ps_capability_list_for_large_screens = "LargeScreen.SkipUADependentOptimizations:";
set req.http.default_ps_capability_list_for_small_screens = "TinyScreen.SkipUADependentOptimizations:";

set req.http.PS-CapabilityList = req.http.default_ps_capability_list_for_large_screens;

# Lazyload
if (req.http.User-Agent ~ “(?i)Chrome/|Firefox/|MSIE |Safari”) {
set req.http.PS-CapabilityList = “ll,ii,dj:”;
}
# lazyload_images (ll), inline_images (ii), defer_javascript (dj), webp (jw) and lossless_webp (ws).
if (req.http.User-Agent ~
“(?i)Chrome/[2][3-9]+\.|Chrome/[[3-9][0-9]+\.|Chrome/[0-9]{3,}\.”) {
set req.http.PS-CapabilityList = “ll,ii,dj,jw,ws:”;
}
# odd ones
if (req.http.User-Agent ~ “(?i)Firefox/[1-2]\.|MSIE [5-8]\.|bot|Yahoo!|Ruby|RPT-HTTPClient|(Google \(\+https\:\/\/developers\.google\.com\/\+\/web\/snippet\/\))|Android|iPad|TouchPad|Silk-Accelerated|Kindle Fire”) {
set req.http.PS-CapabilityList = req.http.default_ps_capability_list_for_large_screens;
}
# mobile
if (req.http.User-Agent ~ “(?i)Mozilla.*Android.*Mobile*|iPhone|BlackBerry|Opera Mobi|Opera Mini|SymbianOS|UP.Browser|J-PHONE|Profile/MIDP|portalmmm|DoCoMo|Obigo|Galaxy Nexus|GT-I9300|GT-N7100|HTC One|Nexus [4|7|S]|Xoom|XT907″) {
set req.http.PS-CapabilityList = req.http.default_ps_capability_list_for_small_screens;
}
# Remove placeholder header values.
remove req.http.default_ps_capability_list_for_large_screens;
remove req.http.default_ps_capability_list_for_large_screens;
}

sub vcl_hash {
# Block 3: Use the PS-CapabilityList value for computing the hash.
hash_data(req.http.PS-CapabilityList);
}
# Block 3a: Define ACL for purge requests
acl purge {
# Purge requests are only allowed from localhost.
“localhost”;
“127.0.0.1″;
#Add your server IP to this list
}
# Block 3b: Issue purge when there is a cache hit for the purge request.
sub vcl_hit {
if (req.request == “PURGE”) {
purge;
error 200 “Purged.”;
}
}

# Block 3c: Issue a no-op purge when there is a cache miss for the purge
# request.
sub vcl_miss {
if (req.request == “PURGE”) {
purge;
error 200 “Purged.”;
}
}

sub vcl_recv {
call generate_user_agent_based_key;

set req.http.X-Forwarded-For = client.ip;
set req.http.Host = regsub(req.http.Host, “:[0-9]+”, “”);

# Block 3d: Verify the ACL for an incoming purge request and handle it.
if (req.request == “PURGE”) {
if (!client.ip ~ purge) {
error 405 “Not allowed.”;
}
return (lookup);
}
# Blocks which decide whether cache should be bypassed or not go here.

# Did not cache the admin and login pages
if (req.url ~ “/wp-(login|admin)”) {
return (pass);
}
// server1 must handle file uploads
if (req.url ~ “media-upload.php” || req.url ~ “file.php” || req.url ~ “async-upload.php”) {
return(pass);
}

// do not cache xmlrpc.php
if (req.url ~ “xmlrpc.php”) {
return(pass);
}

// strip cookies from xmlrpc
if (req.request == “GET” && req.url ~ “xmlrpc.php”){
remove req.http.cookie;return(pass);
}

# Remove the “has_js” cookie
set req.http.Cookie = regsuball(req.http.Cookie, “has_js=[^;]+(; )?”, “”);

# Remove any Google Analytics based cookies
set req.http.Cookie = regsuball(req.http.Cookie, “__utm.=[^;]+(; )?”, “”);

# Remove the Quant Capital cookies (added by some plugin, all __qca)
set req.http.Cookie = regsuball(req.http.Cookie, “__qc.=[^;]+(; )?”, “”);

# Remove the wp-settings-1 cookie
set req.http.Cookie = regsuball(req.http.Cookie, “wp-settings-1=[^;]+(; )?”, “”);

# Remove the wp-settings-time-1 cookie
set req.http.Cookie = regsuball(req.http.Cookie, “wp-settings-time-1=[^;]+(; )?”, “”);

# Remove the wp test cookie
set req.http.Cookie = regsuball(req.http.Cookie, “wordpress_test_cookie=[^;]+(; )?”, “”);

# Are there cookies left with only spaces or that are empty?
if (req.http.cookie ~ “^ *$”) {
unset req.http.cookie;
}

if (req.http.Accept-Encoding) {
# Do no compress compressed files…
if (req.url ~ “\.(jpg|png|gif|gz|tgz|bz2|tbz|mp3|ogg)$”) {
remove req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ “gzip”) {
set req.http.Accept-Encoding = “gzip”;
} elsif (req.http.Accept-Encoding ~ “deflate”) {
set req.http.Accept-Encoding = “deflate”;
} else {
remove req.http.Accept-Encoding;
}
}

# Cache the following files extensions
if (req.url ~ “\.(css|js|png|gif|jp(e)?g)”) {
unset req.http.cookie;
}

# Check the cookies for wordpress-specific items
if (req.http.Cookie ~ “wordpress_” || req.http.Cookie ~ “comment_”) {
return (pass);
}
if (!req.http.cookie) {
unset req.http.cookie;
}

# — End of WordPress specific configuration

# Did not cache HTTP authentication and HTTP Cookie
if (req.http.Authorization || req.http.Cookie) {
# Not cacheable by default
return (pass);
}

# Cache all others requests
return (lookup);

}

# Block 5b: Only cache responses to clients that support gzip. Most clients
# do, and the cache holds much more if it stores gzipped responses.
if (req.http.Accept-Encoding !~ “gzip”) {
return (pass);
}

# Block 6: Mark HTML uncacheable by caches beyond our control.
sub vcl_fetch {
# For static content related to the theme, strip all backend cookies
if (req.url ~ “\.(css|js|png|gif|jp(e?)g)”) {
unset beresp.http.cookie;
}

# A TTL of 30 minutes
set beresp.ttl = 1800s;

return (deliver);
}
# Block 7: Add a header for identifying cache hits/misses.
sub vcl_deliver {
if (obj.hits > 0) {
set resp.http.X-Cache = “HIT”;
} else {
set resp.http.X-Cache = “MISS”;
}
}

Ubuntu 13.10 internet very slow “nothing helps” fix

I installed Ubuntu 13.10 on my laptop and went nuts with the laggy laptop. I have 2gb memory on it, which shouldn’t be causing such a comatose experience. I installed drivers, tweaked memory, did a hundred things, nothing helped.

Digging around in the innards, I found that /etc/resolv.conf was very strange and was showing localhost as the name server. This couldn’t be right. Digging around, I found that any attempt to put working DNS servers was getting rewritten.

In the end, I found a strange fix. Network Manager configuration (sudo gedit /etc/NetworkManager/NetworkManager.conf)was using dns from dnsmasq. Guessing (rightly as it turns out) that I didn’t need dns served from my computer (and i have no idea how it would sync it), I commented out that line and restarted network manager. It looks like this.

<code>dns=dnsmasq</code>

Commented it out like so

<code>#dns=dnsmasq</code>

Now /etc/resolv.conf is showing the DNS servers it gets from the internet provider.

I have no idea if this is the “right answer”, but if your computer is slow and freezing on using internet, and your /etc/resolv.conf is showing 127.0.1.1 or 127.0.0.1 or something as your dns server instead of proper dns server IPs, it is worth a shot. You can always uncomment it if it doesn’t help.

My computer is running faster, freezing less and hasn’t yet exploded.

Ioncube with Nginx+php-fpm giving 502 gateway error SOLVED

Ubuntu 13.10 seems to be having trouble with ioncube and php-fpm. My earlier guide on loading ioncube may not work for you anymore.

This is really strange and I have no idea why no one seems to mention it, but if you are getting frustrated trying to install the ioncube loader on php-fpm, just ignore the instructions to create the 20-ioncube.ini file, and plug the line directly into the end of your php ini.

Steps to install ioncube loader with php5-fpm

cd /usr/local
sudo wget http://downloads2.ioncube.com/loader_downloads/ioncube_loaders_lin_x86-64.tar.gz
sudo tar xzf ioncube_loaders_lin_x86-64.tar.gz
mv /usr/local/ioncube/* /usr/lib/php5/20121212/

This is the same.

Now, instead of creating a file called 20-ioncube.ini or ioncube.ini directly add it to your php.ini file (On Ubuntu with a repository installed php5-fpm package, php.ini will be found at /etc/php5/fpm/php.ini)

At the very end add:

zend_extension = /usr/lib/php5/20121212/ioncube_loader_lin_5.5.so

Then restart php-fpm

service php5-fpm restart

If it still doesn’t work, try doing the same thing as root.

If you can’t find your php.ini, create a php file on your website with some random name. Open it in an editor and add the line:

Access the file on your site with a browser. It will have all kinds of info about php, including the configuration files (php.ini and others) locations.

Ubuntu network slow RTL8101E/RTL8102E PCI Realtek

I recently reinstalled Ubuntu, and found that my network was agonizingly slow. Installing the driver from the Realtek website fixed this. My card is RTL8101E/RTL8102E PCI Express Fast Ethernet controller, but I imagine this will work for other versions too.

The problem is that the default driver does not support this card well. Blacklist it.

sudo gedit /etc/modprobe.d/blacklist-network

and add

r8169.ko

to it

Download driver from the Realtek website.

Extract it. Compile it by going to the folder where you have extracted it (Downloads, for example) as root (your prompt will be something like this: root@vidyut-Compaq-435-Notebook-PC:~/Downloads/r8101-1.025.00#)and:

make

and

make install

The make install didn’t work for me, so I had to manually copy it into the folder.

cp src/r8101.ko /lib/modules/3.11.0-12-generic/kernel/drivers/net/ethernet/realtek/

Then run:

depmod -a
modprobe r8101
service network-manager restart

That should do it or try

ifconfig eth0 down
ifconfig eth0 up
service network-manager restart

Your network should be working normally now.

WordPress with MariaDB instead of MySQL

So I heard good things about MariaDB and decided to switch from MySQL to MariaDB. MariaDB is a fork of MySQL developed by the original developers of MySQL and it is intended to be a drop in replacement – meaning all your commands and databases from MySQL should continue to work seamlessly after the switch.

Tall claim, but with years of relationships with webservers, it isn’t too tough to know that even an upgrade can break things. Here, however seamlessly, the DATABASE management software was being replaced. Only a complete novice would believe “as advertized” to the point of not being worried.

My biggest fear was breaking my blogs. Backups are there, but…. it is unpleasant to see your precious sites not working, and I was apprehensive.

So anyway, I did it.

Added the repository (these are my instructions, but they helpfully provide a configurator for customized MariaDB repositories for your Operating System – version – MariaDB version, which you should totally use).

sudo apt-get install software-properties-common
sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db
sudo add-apt-repository 'deb http://mirrors.hustunique.com/mariadb/repo/5.5/ubuntu saucy main'

I’d done paranoid backups to the nth degree before, as you should too, but I won’t bore you with the details. Suffice it to say that I had 3 of each database AND a snapshot of my VPS to restore with “one click” if I got itchy AND I copied the mysql directory anyway (I really love my blogs. Really). I think this was mostly of therapeutic value after the first backup, but hey, it was good for my blood pressure.

Updated and installed MariaDB.

sudo apt-get update
sudo apt-get install mariadb-server

The only pain here was that the repository I used was agonizingly slow to download from, which really did not help my anxiety levels, since I’m used to the more blazing fast ubuntu repositories. Or perhaps it was a temporary patch of bad network I hit.

Regardless, if you are superstitious, you may want to avoid this one.

After a wait that almost had me too old to care, the installation was done.

That is it. There was no noticeable difference to my site except seeming slightly faster. I noticed the configuration file got replaced, but the defaults are good enough that the blogs are completely normal. I expect once I get around to tweaking it, the performance may get even better, but this is good already.

The backups did not get used. A textbook “drop in”. Zero hassle.

Do it already. The only cure for your wondering is finding out.

Meteor::Socket bind: Address already in use at Meteor/Socket.pm line 115.

If, after installing meteor server, you get an error like

Meteor::Socket bind: Address already in use at Meteor/Socket.pm line 115.

when trying to start it up with ./meteord -d or /etc/init.d/meteord start, it means that you likely have another instance of meteor running.

Unless you have changed ports around, you can kill the exising instance of meteor with pkill meteor or simply use it without starting a new one ;)

Installing Meteor Server on Ubuntu

Meteor is a javascript server that does interesting things like live updating your live blog with far less load on your server than without it. However, setting it up is iffy, and the instructions are not idiot proof. So here are the steps distilled from my hits and misses, so that you may not have to go through that yourself.

Meteor Server Installation instructions

These instructions are as per instructions provided on the Meteor Server website along with my comments.

Make a directory for the Meteor Server and cd into it.
mkdir usr/local/meteor
cd usr/local/meteor

We begin with getting and unpacking Meteor Server:
wget http://meteorserver.org/download/latest.tgz
tar zxvf latest.tgz
rm latest.tgz

Alas, this doesn’t work. There is no file at the provided url, and I had to use the url provided for download. So this should work.

wget https://github.com/visitsb/meteorserver/blob/master/build/meteor-latest.tgz?raw=true
At this point, check the name of the file you got.
ls
if it is “meteor-latest.tgz?raw=true”, then
mv meteor-latest.tgz?raw=true meteor-latest.tgz
before proceeding, or the next step won’t work. Now
ls
should give you “meteor-latest.tgz”. Ready to move on.
tar zxvf meteor-latest.tgz
rm meteor-latest.tgz

Now to set it up.

Copy the init script to /etc/init.d/meteord:

cp daemoncontroller.dist /etc/init.d/meteord

You will need to edit the file to change the path if you did not install meteor in /usr/local/meteor. If you wish to use this to start/stop Meteor, you will need to edit line 14 to specify which user account will be used to run it. The default is meteor, so if you want to create that user account now, type:

useradd meteor

Now copy the configuration file to /etc/meteord.conf:

cp meteord.conf.dist /etc/meteord.conf

To start meteor at boot, they recommend

chkconfig meteord on

This part didn't work for me, as I don't have chkconfig installed - the instructions seem "Fedora-ish" - I have no idea how Fedora works. Never used it. Instead, I did

update-rc.d meteord defaults
update-rc.d meteord enable

At this stage, you should be able to start meteor in debug mode (according to them).

./meteor -d

For me, it didn't. I needed to

chmod +x meteord

as they have suggested. I also did

chmod /etc/init.d/meteord

I could start meteor in debug mode successfully, but

/etc/init.d/meteord start

wouldn't work.

I was getting "/bin/sh^M: bad interpreter: No such file or directory"

Found two problems. The first was that /etc/init.d/meteord script refers to /etc/init.d/functions which didn't exist. I edited the file to change the line

. /etc/init.d/functions

to

. /lib/lsb/init-functions

By checking what file was being referred by scripts that were working.

About the "^M" in the error, I discovered that it was caused by the file having dos line endings. It should have been unix line endings.

I opened it in vi

vi /etc/init.d/meteord

and in the command mode itself (hit ESC if you've switched to INSERT) entered:

:set fileformat=unix

Then saved and exited

:wq

Now

/etc/init.d/meteord

starts Meteor.

:)

I will do a separate post on using Meteor to power Live Blogging Plus (when I finish doing it).

live-blogging-plus

Setting up live blogging that works with the latest version of WordPress

True to form, I want big things for my blog, but they aren’t easy. This time the idea came from a reader who wanted my commentary tweet series on current events to be done instead as a live blog, so that they could read all the ideas at once in one place and they would also remain easily accessible.

Great idea, except all the plugins I came across for live blogging were outdated and not working correctly with the latest WordPress, with the exception of 24liveblog, which I was not so keen on, as the content does not reside on my server, but theirs (though it is free and they promise never to take it down).

If you are fine with the content being hosted on another service, look no further, 24liveblog is good, and free and the resulting liveblog can be embedded on your site with a code provided.

I wanted to make the plugins work as I wanted the content to reside on my server AND I wanted it to make tweets linking to the post with each update.

So I have updated the live blogging plugin to fix several issues with tweets not getting posted to Twitter. You can download Live Blogging Plus it here.

The plugin is capable of delivering the live blog more efficiently using meteor, so I am planning to set up a meteor server for it to use. Stay tuned.

Enhanced by Zemanta

Upload Error: client intended to send too large body

If you are using Nginx and are unable to upload files exceeding 1MB or so (most common) and get your error log shows “client intended to send too large body”, then here is the fix.

Edit your Nginx configuration file (which on Debian/Ubuntu will be found at /etc/nginx/nginx.conf) and edit the setting for client_max_body_size to something you can live with. If there is no line for it, add this line:

client_max_body_size 5M;

Obviously, replace 5M (for MB) with a number that makes you happy if your upload is larger.

Enhanced by Zemanta