Compare commits
142 Commits
Author | SHA1 | Date | |
---|---|---|---|
f3598c3c97 | |||
4fd73ac512 | |||
747f956d01 | |||
1519d2c64c | |||
43f4f80836 | |||
ca7726c441 | |||
c78eb5228a | |||
81053c0868 | |||
b1367d237f | |||
603207cc2b | |||
43e2b3e403 | |||
f69422e82f | |||
96e82d4115 | |||
6d4a2c4cc4 | |||
f8e5f73e18 | |||
cdc2811d7b | |||
32d26c3e10 | |||
b482d4d901 | |||
ad00284a22 | |||
a835c9e709 | |||
597f922db7 | |||
cdb456a9b5 | |||
714902d12a | |||
657d8535a6 | |||
0594681760 | |||
f5478cfef5 | |||
e10217c64d | |||
f91def64d7 | |||
f991123545 | |||
86ebbfbdd3 | |||
a35287b367 | |||
ddabb4620e | |||
b86211d2d8 | |||
0b414813aa | |||
7dbe65e5f8 | |||
ef38dd1532 | |||
4b91fb1212 | |||
80a60d58cf | |||
1257f8830a | |||
8ed8bfdc2a | |||
042eb9e5e8 | |||
15cac480fe | |||
02e867c17d | |||
d51cd31ebc | |||
ea354cf035 | |||
0d4bacecd8 | |||
86197c9b62 | |||
f8fd92fb0a | |||
61b038e0d9 | |||
5b60b7ffb8 | |||
6e1911bdd5 | |||
d376074936 | |||
64b985f005 | |||
cc50339bcb | |||
8820501644 | |||
07021b01f4 | |||
13891a52dd | |||
a1bbe23800 | |||
2b5fdc00ee | |||
68ff573904 | |||
3a62814fdc | |||
da043ee0c4 | |||
5cd6ea41f9 | |||
d321be888c | |||
7230b11d3b | |||
7b9ec17d8d | |||
18b38db037 | |||
adfb468913 | |||
43f4211abb | |||
b1b8585518 | |||
d8fd1a5de0 | |||
585baf0668 | |||
f99eff3a99 | |||
ff4b913396 | |||
e861348475 | |||
b0831240b1 | |||
37dee56bb7 | |||
2b11fc2299 | |||
754ca4cd4f | |||
10e3dc16d2 | |||
e23c6dae90 | |||
96c1d7c59c | |||
c574943e16 | |||
3226206267 | |||
63543b890a | |||
b58f859eff | |||
170a1efeb1 | |||
50e3d1667f | |||
269732d8ea | |||
a11206d4ba | |||
cb78a6c3d6 | |||
662280c139 | |||
68dd2fd019 | |||
c193c5d03b | |||
4efb5b43c6 | |||
3d2eb81cd5 | |||
a8d7937889 | |||
98c9fdff24 | |||
f05512fd1d | |||
fc460f9497 | |||
1fd55d98e1 | |||
4f5746f2a7 | |||
3d90a806fa | |||
cf9abcd5b8 | |||
e1036185ae | |||
d1af24736b | |||
236665d7d3 | |||
c340d851d3 | |||
38545cfffd | |||
bf19ecffcb | |||
6fa6d02360 | |||
e588f261f8 | |||
5d5545ec9e | |||
d152050e29 | |||
a0da2c0ea0 | |||
2e86243d88 | |||
6b61c7d454 | |||
3881a5d66d | |||
69b223a370 | |||
894cb6ea27 | |||
b81e3e8f15 | |||
7d49f9b820 | |||
9dad1586af | |||
d60178f598 | |||
5d74e10b08 | |||
40422ab4b0 | |||
b35a23d89e | |||
3445b5b1b6 | |||
8315a3872d | |||
6abea8746c | |||
4973dd93be | |||
d31ccbd268 | |||
2bbe39cf4a | |||
2219177eac | |||
5c0933d19a | |||
8de9eca8ae | |||
d172180660 | |||
9f78b17fd1 | |||
f5e33fb396 | |||
c88e690bb4 | |||
c1de398acc | |||
73f7c88122 |
@ -9,4 +9,4 @@ charset = utf-8
|
||||
|
||||
[{Dockerfile,docker-compose.yml}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
indent_size = 2
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,3 +2,4 @@
|
||||
/logs
|
||||
/data
|
||||
.env
|
||||
/.project
|
@ -42,7 +42,7 @@ Laradock uses [Hugo](https://gohugo.io/) as website generator tool, with the [Ma
|
||||
1. Install [Hugo](https://gohugo.io/) on your machine (easy thing).
|
||||
2. Open the `DOCUMENTATION/_settings/content` and search for the markdown file you want to edit (every folder represents a section in the menu).
|
||||
3. Delete the `/docs` folder from the root.
|
||||
4. When you finish editing, run the `hugo` command to generate the HTML docs (in the `/docs`).
|
||||
4. When you finish editing, go to `DOCUMENTATION/_settings/` and run the `hugo` command to generate the HTML docs (inside new `/docs` folder).
|
||||
|
||||
### To Host the website locally
|
||||
Go to `DOCUMENTATION/_settings` in your terminal and run `hugo serve` to host the website locally.
|
||||
|
@ -706,6 +706,23 @@ docker-compose up -d mariadb phpmyadmin
|
||||
|
||||
|
||||
|
||||
<br>
|
||||
<a name="Use-Adminer"></a>
|
||||
## Use Adminer
|
||||
|
||||
1 - Run the Adminer Container (`adminer`) with the `docker-compose up` command. Example:
|
||||
|
||||
```bash
|
||||
docker-compose up -d adminer
|
||||
```
|
||||
|
||||
2 - Open your browser and visit the localhost on port **8080**: `http://localhost:8080`
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<br>
|
||||
<a name="Use-pgAdmin"></a>
|
||||
## Use PgAdmin
|
||||
@ -1332,15 +1349,36 @@ Moving from Docker Toolbox (VirtualBox) to Docker Native (for Mac/Windows). Requ
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<br>
|
||||
<a name="Speed-MacOS"></a>
|
||||
## Improve speed on MacOS
|
||||
|
||||
Sharing code into Docker containers with osxfs have very poor performance compared to Linux. You can get around this issue by using NFS to share your files betwen your host and your container.
|
||||
Sharing code into Docker containers with osxfs have very poor performance compared to Linux. Likely there are some workarounds:
|
||||
|
||||
> How to share files using NFS (d4m-nfs)
|
||||
### Workaround A: using dinghy
|
||||
|
||||
[d4m-nfs](https://github.com/IFSight/d4m-nfs) automatically mount NFS volume instead of osxfs one.
|
||||
[Dinghy](https://github.com/codekitchen/dinghy) creates its own VM using docker-machine, it will not modify your existing docker-machine VMs.
|
||||
|
||||
Quick Setup giude, (we recommend you check their docs)
|
||||
|
||||
1) `brew tap codekitchen/dinghy`
|
||||
|
||||
2) `brew install dinghy`
|
||||
|
||||
3) `dinghy create --provider virtualbox` (must have virtualbox installed, but they support other providers if you prefer)
|
||||
|
||||
4) after the above command is done it will display some env variables, copy them to the bash profile or zsh or.. (this will instruct docker to use the server running inside the VM)
|
||||
|
||||
5) `docker-compose up ...`
|
||||
|
||||
|
||||
|
||||
|
||||
### Workaround B: using d4m-nfs
|
||||
|
||||
[D4m-nfs](https://github.com/IFSight/d4m-nfs) automatically mount NFS volume instead of osxfs one.
|
||||
|
||||
1) Update the Docker [File Sharing] preferences:
|
||||
|
||||
@ -1358,14 +1396,12 @@ git clone https://github.com/IFSight/d4m-nfs ~/d4m-nfs
|
||||
|
||||
```txt
|
||||
/Users:/Users
|
||||
/Volumes:/Volumes
|
||||
/private:/private
|
||||
```
|
||||
|
||||
5) Create (or edit) the file `/etc/exports`, make sure it exists and is empty. (There may be collisions if you come from Vagrant or if you already executed the `d4m-nfs.sh` script before).
|
||||
|
||||
|
||||
6) Run the `d4m-nfs.sh` script:
|
||||
6) Run the `d4m-nfs.sh` script (might need Sudo):
|
||||
|
||||
```bash
|
||||
~/d4m-nfs/d4m-nfs.sh
|
||||
@ -1374,10 +1410,26 @@ git clone https://github.com/IFSight/d4m-nfs ~/d4m-nfs
|
||||
That's it! Run your containers.. Example:
|
||||
|
||||
```bash
|
||||
docker-compose up -d nginx mysql
|
||||
docker-compose up ...
|
||||
```
|
||||
|
||||
**Note:** If you faced any errors, try restarting Docker, and make sure you have no spaces in the `d4m-nfs-mounts.txt` file, and your `/etc/exports` file is clear.
|
||||
*Note: If you faced any errors, try restarting Docker, and make sure you have no spaces in the `d4m-nfs-mounts.txt` file, and your `/etc/exports` file is clear.*
|
||||
|
||||
|
||||
|
||||
### Other good workarounds:
|
||||
|
||||
- [docker-sync](https://github.com/EugenMayer/docker-sync)
|
||||
- Add more here..
|
||||
|
||||
|
||||
|
||||
|
||||
More details about this issue [here](https://github.com/docker/for-mac/issues/77).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -43,7 +43,7 @@ git submodule add https://github.com/Laradock/laradock.git
|
||||
|
||||
- If you are not using Git yet for your project, you can use `git clone` instead of `git submodule `.
|
||||
|
||||
- Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. [Check this](#keep-tracking-Laradock)
|
||||
- Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. [Check this](/documentation/#keep-track-of-your-laradock-changes)
|
||||
|
||||
|
||||
Your folder structure should look like this:
|
||||
@ -82,6 +82,7 @@ Your folder structure should look like this:
|
||||
*Or you can keep `default.conf` as it is, and create a separate config `my-site.conf` file for it.*
|
||||
|
||||
**In case of Apache:** :P
|
||||
|
||||
<br>
|
||||
|
||||
|
||||
@ -136,8 +137,8 @@ You can rename the config files, project folders and domains as you like, just m
|
||||
|
||||
If you are using **Docker Toolbox** (VM), do one of the following:
|
||||
|
||||
- Upgrade to Docker [Native](https://www.docker.com/products/docker) for Mac/Windows (Recommended). Check out [Upgrading Laradock](#upgrading-laradock)
|
||||
- Use Laradock v3.* (Visit the `Laradock-ToolBox` [Branch](https://github.com/laradock/laradock/tree/Laradock-ToolBox)).
|
||||
- Upgrade to Docker [Native](https://www.docker.com/products/docker) for Mac/Windows (Recommended). Check out [Upgrading Laradock](/documentation/#upgrading-laradock)
|
||||
- Use Laradock v3.\*. Visit the [LaraDock-ToolBox](https://github.com/laradock/laradock/tree/LaraDock-ToolBox) branch. *(outdated)*
|
||||
|
||||
<br>
|
||||
|
||||
@ -167,7 +168,7 @@ docker-compose up -d nginx mysql
|
||||
|
||||
You can select your own combination of containers form the list below:
|
||||
|
||||
> `nginx`, `hhvm`, `php-fpm`, `mysql`, `redis`, `postgres`, `mariadb`, `neo4j`, `mongo`, `apache2`, `caddy`, `memcached`, `beanstalkd`, `beanstalkd-console`, `rabbitmq`, `beanstalkd-console`, `workspace`, `phpmyadmin`, `aerospike`, `pgadmin`, `elasticsearch`, `rethinkdb`, `postgres-postgis`, `certbot`, `mailhog`, `minio` and more...!
|
||||
> `nginx`, `hhvm`, `php-fpm`, `mysql`, `redis`, `postgres`, `mariadb`, `neo4j`, `mongo`, `apache2`, `caddy`, `memcached`, `beanstalkd`, `beanstalkd-console`, `rabbitmq`, `beanstalkd-console`, `workspace`, `phpmyadmin`, `adminer`, `aerospike`, `pgadmin`, `elasticsearch`, `rethinkdb`, `postgres-postgis`, `certbot`, `mailhog`, `minio` and more...!
|
||||
|
||||
*(Please note that sometimes we forget to update the docs, so check the `docker-compose.yml` file to see an updated list of all available containers).*
|
||||
|
||||
|
@ -169,6 +169,7 @@ What's better than a **Demo Video**:
|
||||
- RabbitMQ Console
|
||||
- **Tools:**
|
||||
- PhpMyAdmin
|
||||
- Adminer
|
||||
- PgAdmin
|
||||
- ElasticSearch
|
||||
- Selenium
|
||||
|
11
README.md
11
README.md
@ -4,7 +4,7 @@
|
||||
|
||||
[](http://zalt.me)
|
||||
|
||||
Laradock is a Docker PHP development environment that facilitate running **PHP** Apps on **Docker**.
|
||||
Laradock is a Docker PHP development environment that facilitates running **PHP** Apps on **Docker**.
|
||||
|
||||
## Documentation
|
||||
|
||||
@ -12,15 +12,18 @@ Laradock is a Docker PHP development environment that facilitate running **PHP**
|
||||
|
||||
## Credits
|
||||
|
||||
**Super Admins:**
|
||||
**Admins / Maintainers:**
|
||||
|
||||
- [Mahmoud Zalt](https://github.com/Mahmoudz) (mahmoudz) [ [Twitter](https://twitter.com/Mahmoud_Zalt) | [Personal Site](http://zalt.me) | [LinkedIn](https://www.linkedin.com/in/mahmoudzalt) ]
|
||||
- [Bo-Yi Wu](https://github.com/appleboy) (appleboy) [ [Twitter](https://twitter.com/appleboy) ]
|
||||
- [Philippe Trépanier](https://github.com/philtrep) (philtrep)
|
||||
- [Mike Erickson](https://github.com/mikeerickson) (mikeerickson)
|
||||
- Join Us!
|
||||
- [Dwi Fahni Denni](https://github.com/zeroc0d3) (zeroc0d3)
|
||||
- [Thor Erik](https://github.com/thorerik) (thorerik)
|
||||
- [Winfried van Loon](https://github.com/winfried-van-loon) (winfried-van-loon)
|
||||
- Contribute and join us!
|
||||
|
||||
**Amazing Contributors:**
|
||||
**Contributors:**
|
||||
|
||||
- [Contributors](https://github.com/laradock/laradock/graphs/contributors)
|
||||
|
||||
|
@ -1,112 +1,118 @@
|
||||
#### Install Docker
|
||||
```
|
||||
Login Digital Ocean
|
||||
Add Droplet
|
||||
1 Click Install docker
|
||||
Choose Droplet
|
||||
reset ROOT password
|
||||
check email
|
||||
```
|
||||
|
||||
- Visit [DigitalOcean](https://cloud.digitalocean.com/login) and login.
|
||||
- Click the `Create Droplet` button.
|
||||
- Open the `One-click apps` tab.
|
||||
- Select Docker with your preferred version.
|
||||
- Continue creating the droplet as you normally would.
|
||||
- If needed, check your e-mail for the droplet root password.
|
||||
|
||||
#### SSH to your Server
|
||||
|
||||
Find the IP address of the droplet in the DigitalOcean interface. Use it to connect to the server.
|
||||
|
||||
```
|
||||
ssh root@ipaddress
|
||||
```
|
||||
you will be prompt of that password.
|
||||
type the password you receive in your email
|
||||
|
||||
then it will ask to you to change a new password
|
||||
just change it to the custom root password you want
|
||||
You may be prompted for a password. Type the one you found within your e-mailbox. It'll then ask you to change the password.
|
||||
|
||||
After SSH
|
||||
you can check that docker command is working by typing
|
||||
You can now check if Docker is available:
|
||||
|
||||
```
|
||||
$root@midascode:~# docker
|
||||
$root@server:~# docker
|
||||
```
|
||||
|
||||
#### Set Up Your Laravel Project
|
||||
|
||||
```
|
||||
$root@midascode:~# apt-get install git
|
||||
$root@midascode:~# git clone https://github.com/laravel/laravel
|
||||
$root@midascode:~# cd laravel
|
||||
$root@midascode:~/laravel/ git submodule add https://github.com/LaraDock/laradock.git
|
||||
$root@midascode:~/laravel/ cd laradock
|
||||
$root@server:~# apt-get install git
|
||||
$root@server:~# git clone https://github.com/laravel/laravel
|
||||
$root@server:~# cd laravel
|
||||
$root@server:~/laravel/ git submodule add https://github.com/LaraDock/laradock.git
|
||||
$root@server:~/laravel/ cd laradock
|
||||
```
|
||||
|
||||
#### Install docker-compose command
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock# curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose
|
||||
$root@midascode:~/chmod +x /usr/local/bin/docker-compose
|
||||
$root@server:~/laravel/laradock# curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose
|
||||
$root@server:~/chmod +x /usr/local/bin/docker-compose
|
||||
```
|
||||
|
||||
#### Create Your LaraDock Containers
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock# docker-compose up -d nginx mysql
|
||||
$root@server:~/laravel/laradock# docker-compose up -d nginx mysql
|
||||
```
|
||||
|
||||
Note that more containers are available, find them in the [docs](http://laradock.io/introduction/#supported-software-containers) or the `docker-compose.yml` file.
|
||||
|
||||
#### Go to Your Workspace
|
||||
|
||||
```
|
||||
docker-compose exec workspace bash
|
||||
```
|
||||
|
||||
#### Install laravel Dependencies, Add .env , generate Key and give proper permission certain folder
|
||||
#### Install and configure Laravel
|
||||
|
||||
Let's install Laravel's dependencies, add the `.env` file, generate the key and give proper permissions to the cache folder.
|
||||
|
||||
```
|
||||
$ root@0e77851d27d3:/var/www# composer install
|
||||
$ root@0e77851d27d3:/var/www# cp .env.example .env
|
||||
$ root@0e77851d27d3:/var/www# php artisan key:generate
|
||||
$ root@0e77851d27d3:/var/www# exit
|
||||
$root@midascode:~/laravel/laradock# cd ..
|
||||
$root@midascode:~/laravel# sudo chmod -R 777 storage bootstrap/cache
|
||||
$ root@workspace:/var/www# composer install
|
||||
$ root@workspace:/var/www# cp .env.example .env
|
||||
$ root@workspace:/var/www# php artisan key:generate
|
||||
$ root@workspace:/var/www# exit
|
||||
$root@server:~/laravel/laradock# cd ..
|
||||
$root@server:~/laravel# sudo chmod -R 777 storage bootstrap/cache
|
||||
```
|
||||
|
||||
you can then view your laravel site at your ipaddress
|
||||
for example
|
||||
You can then view your Laravel site by visiting the IP address of your server in your browser. For example:
|
||||
|
||||
```
|
||||
192.168.1.1
|
||||
http://192.168.1.1
|
||||
```
|
||||
|
||||
You will see there Laravel Default Welcome Page
|
||||
It should show you the Laravel default welcome page.
|
||||
|
||||
but if you need to view on your custom domain name
|
||||
which you would.
|
||||
However, we want it to show up using your custom domain name, as well.
|
||||
|
||||
#### Using Your Own Domain Name
|
||||
login to your DNS provider
|
||||
Godaddy, Namecheap what ever...
|
||||
And Point the Custom Domain Name Server to
|
||||
|
||||
Login to your DNS provider, such as Godaddy, Namecheap.
|
||||
|
||||
Point the Custom Domain Name Server to:
|
||||
|
||||
```
|
||||
ns1.digitalocean.com
|
||||
ns2.digitalocean.com
|
||||
ns3.digitalocean.com
|
||||
```
|
||||
In Your Digital Ocean Account go to
|
||||
```
|
||||
https://cloud.digitalocean.com/networking/domains
|
||||
```
|
||||
add your domain name and choose the server ip you provision earlier
|
||||
|
||||
#### Serve Site With NGINX (HTTP ONLY)
|
||||
Go back to command line
|
||||
Within DigitalOcean, you'll need to change some settings, too.
|
||||
|
||||
Visit: https://cloud.digitalocean.com/networking/domains
|
||||
|
||||
Add your domain name and choose the server IP you'd provision earlier.
|
||||
|
||||
#### Serving Site With NGINX (HTTP ONLY)
|
||||
|
||||
Go back to command line.
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock# cd nginx
|
||||
$root@midascode:~/laravel/laradock/nginx# vim laravel.conf
|
||||
```
|
||||
remove default_server
|
||||
$root@server:~/laravel/laradock# cd nginx
|
||||
$root@server:~/laravel/laradock/nginx# vim laravel.conf
|
||||
```
|
||||
|
||||
Remove `default_server`
|
||||
|
||||
```
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server ipv6only=on;
|
||||
|
||||
```
|
||||
and add server_name (your custom domain)
|
||||
|
||||
And add `server_name` (your custom domain)
|
||||
|
||||
```
|
||||
listen 80;
|
||||
listen [::]:80 ipv6only=on;
|
||||
@ -114,27 +120,29 @@ remove default_server
|
||||
```
|
||||
|
||||
#### Rebuild Your Nginx
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock/nginx# docker-compose down
|
||||
$root@midascode:~/laravel/laradock/nginx# docker-compose build nginx
|
||||
$root@server:~/laravel/laradock/nginx# docker-compose down
|
||||
$root@server:~/laravel/laradock/nginx# docker-compose build nginx
|
||||
```
|
||||
|
||||
#### Re Run Your Containers MYSQL and NGINX
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock/nginx# docker-compose up -d nginx mysql
|
||||
$root@server:~/laravel/laradock/nginx# docker-compose up -d nginx mysql
|
||||
```
|
||||
|
||||
###### View Your Site with HTTP ONLY (http://yourdomain.com)
|
||||
**View Your Site with HTTP ONLY (http://yourdomain.com)**
|
||||
|
||||
#### Run Site on SSL with Let's Encrypt Certificate
|
||||
|
||||
###### Note: You need to Use Caddy here Instead of Nginx
|
||||
**Note: You need to Use Caddy here Instead of Nginx**
|
||||
|
||||
###### To go Caddy Folders and Edit CaddyFile
|
||||
To go Caddy Folders and Edit CaddyFile
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock# cd caddy
|
||||
$root@midascode:~/laravel/laradock/caddy# vim Caddyfile
|
||||
$root@server:~/laravel/laradock# cd caddy
|
||||
$root@server:~/laravel/laradock/caddy# vim Caddyfile
|
||||
```
|
||||
|
||||
Remove 0.0.0.0:80
|
||||
@ -143,31 +151,36 @@ Remove 0.0.0.0:80
|
||||
0.0.0.0:80
|
||||
root /var/www/public
|
||||
```
|
||||
|
||||
and replace with your https://yourdomain.com
|
||||
|
||||
```
|
||||
https://yourdomain.com
|
||||
root /var/www/public
|
||||
```
|
||||
|
||||
uncomment tls
|
||||
|
||||
```
|
||||
#tls self-signed
|
||||
```
|
||||
|
||||
and replace self-signed with your email address
|
||||
|
||||
```
|
||||
tls midascodebreaker@gmai.com
|
||||
tls serverbreaker@gmai.com
|
||||
```
|
||||
|
||||
This is needed Prior to Creating Let's Encypt
|
||||
|
||||
#### Run Your Caddy Container without the -d flag and Generate SSL with Let's Encrypt
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock/caddy# docker-compose up caddy
|
||||
$root@server:~/laravel/laradock/caddy# docker-compose up caddy
|
||||
```
|
||||
|
||||
you will be prompt here to enter your email... you may enter it or not
|
||||
You'll be prompt here to enter your email... you may enter it or not
|
||||
|
||||
```
|
||||
Attaching to laradock_mysql_1, laradock_caddy_1
|
||||
caddy_1 | Activating privacy features...
|
||||
@ -179,17 +192,18 @@ caddy_1 | https://yourdomain.com
|
||||
caddy_1 | http://yourdomain.com
|
||||
```
|
||||
|
||||
After it finish Press Ctrl + C to exit ...
|
||||
After it finishes, press `Ctrl` + `C` to exit.
|
||||
|
||||
#### Stop All Containers and ReRun Caddy and Other Containers on Background
|
||||
|
||||
```
|
||||
$root@midascode:~/laravel/laradock/caddy# docker-compose down
|
||||
$root@midascode:~/laravel/laradock/caddy# docker-compose up -d mysql caddy
|
||||
$root@server:~/laravel/laradock/caddy# docker-compose down
|
||||
$root@server:~/laravel/laradock/caddy# docker-compose up -d mysql caddy
|
||||
```
|
||||
|
||||
View your Site in the Browser Securely Using HTTPS (https://yourdomain.com)
|
||||
|
||||
##### Note that Certificate will be Automatically Renew By Caddy
|
||||
**Note that Certificate will be Automatically Renew By Caddy**
|
||||
|
||||
>References:
|
||||
>
|
||||
@ -200,14 +214,3 @@ View your Site in the Browser Securely Using HTTPS (https://yourdomain.com)
|
||||
- [https://caddyserver.com/docs/automatic-https](https://caddyserver.com/docs/automatic-https)
|
||||
- [https://caddyserver.com/docs/tls](https://caddyserver.com/docs/tls)
|
||||
- [https://caddyserver.com/docs/caddyfile](https://caddyserver.com/docs/caddyfile)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
9
adminer/Dockerfile
Normal file
9
adminer/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
FROM adminer:latest
|
||||
|
||||
MAINTAINER Patrick Artounian <partounian@gmail.com>
|
||||
|
||||
# Add volume for sessions to allow session persistence
|
||||
VOLUME /sessions
|
||||
|
||||
# We expose Adminer on port 8080 (Adminer's default)
|
||||
EXPOSE 8080
|
@ -1,5 +1,7 @@
|
||||
FROM phusion/baseimage:latest
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
COPY run-certbot.sh /root/certbot/run-certbot.sh
|
||||
|
||||
RUN apt-get update
|
||||
|
@ -26,14 +26,16 @@ services:
|
||||
- COMPOSER_GLOBAL_INSTALL=${WORKSPACE_COMPOSER_GLOBAL_INSTALL}
|
||||
- INSTALL_WORKSPACE_SSH=${WORKSPACE_INSTALL_WORKSPACE_SSH}
|
||||
- INSTALL_LARAVEL_ENVOY=${WORKSPACE_INSTALL_LARAVEL_ENVOY}
|
||||
- INSTALL_DEPLOYER=${WORKSPACE_INSTALL_LARAVEL_ENVOY}
|
||||
- INSTALL_LINUXBREW=${WORKSPACE_INSTALL_LARAVEL_ENVOY}
|
||||
- INSTALL_DEPLOYER=${WORKSPACE_INSTALL_DEPLOYER}
|
||||
- INSTALL_LINUXBREW=${WORKSPACE_INSTALL_LINUXBREW}
|
||||
- INSTALL_MC=${WORKSPACE_INSTALL_MC}
|
||||
- INSTALL_SYMFONY=${WORKSPACE_INSTALL_SYMFONY}
|
||||
- PUID=${WORKSPACE_PUID}
|
||||
- PGID=${WORKSPACE_PGID}
|
||||
- NODE_VERSION=${WORKSPACE_NODE_VERSION}
|
||||
- YARN_VERSION=${WORKSPACE_YARN_VERSION}
|
||||
- TZ=${WORKSPACE_TIMEZONE}
|
||||
dockerfile: "Dockerfile-${PHP_VERSION}"
|
||||
volumes_from:
|
||||
- applications
|
||||
extra_hosts:
|
||||
@ -41,6 +43,9 @@ services:
|
||||
ports:
|
||||
- "${WORKSPACE_SSH_PORT}:22"
|
||||
tty: true
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### PHP-FPM Container #######################################
|
||||
|
||||
@ -61,7 +66,8 @@ services:
|
||||
- INSTALL_MYSQLI=${PHP_FPM_INSTALL_MYSQLI}
|
||||
- INSTALL_TOKENIZER=${PHP_FPM_INSTALL_TOKENIZER}
|
||||
- INSTALL_INTL=${PHP_FPM_INSTALL_INTL}
|
||||
dockerfile: ${PHP_FPM_DOCKER_FILE}
|
||||
- INSTALL_GHOSTSCRIPT=${PHP_FPM_INSTALL_GHOSTSCRIPT}
|
||||
dockerfile: "Dockerfile-${PHP_VERSION}"
|
||||
volumes_from:
|
||||
- applications
|
||||
expose:
|
||||
@ -72,8 +78,11 @@ services:
|
||||
- "dockerhost:${DOCKER_HOST_IP}"
|
||||
environment:
|
||||
- PHP_IDE_CONFIG=${PHP_IDE_CONFIG}
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### PHP Worker Container #####################################
|
||||
|
||||
php-worker:
|
||||
build:
|
||||
context: ./php-worker
|
||||
@ -81,6 +90,8 @@ services:
|
||||
- applications
|
||||
depends_on:
|
||||
- workspace
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Nginx Server Container ##################################
|
||||
|
||||
@ -99,6 +110,9 @@ services:
|
||||
- "${NGINX_HOST_HTTPS_PORT}:443"
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### Apache Server Container #################################
|
||||
|
||||
@ -112,13 +126,14 @@ services:
|
||||
volumes:
|
||||
- ${APACHE_HOST_LOG_PATH}:/var/log/apache2
|
||||
- ./apache2/sites:/etc/apache2/sites-available
|
||||
|
||||
|
||||
ports:
|
||||
- "${APACHE_HOST_HTTP_PORT}:80"
|
||||
- "${APACHE_HOST_HTTPS_PORT}:443"
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### HHVM Container ##########################################
|
||||
|
||||
@ -130,6 +145,9 @@ services:
|
||||
- "9000"
|
||||
depends_on:
|
||||
- workspace
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### Minio Container #########################################
|
||||
|
||||
@ -140,15 +158,17 @@ services:
|
||||
ports:
|
||||
- "${MINIO_PORT}:9000"
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: access
|
||||
MINIO_SECRET_KEY: secretkey
|
||||
- MINIO_ACCESS_KEY=access
|
||||
- MINIO_SECRET_KEY=secretkey
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### MySQL Container #########################################
|
||||
|
||||
mysql:
|
||||
build:
|
||||
context: ./mysql
|
||||
args:
|
||||
environment:
|
||||
- MYSQL_DATABASE=${MYSQL_DATABASE}
|
||||
- MYSQL_USER=${MYSQL_USER}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
@ -157,6 +177,24 @@ services:
|
||||
- mysql:/var/lib/mysql
|
||||
ports:
|
||||
- "${MYSQL_PORT}:3306"
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### MSSQL Container #########################################
|
||||
|
||||
mssql:
|
||||
build:
|
||||
context: ./mssql
|
||||
environment:
|
||||
- MSSQL_DATABASE=${MSSQL_DATABASE}
|
||||
- SA_PASSWORD=${MSSQL_PASSWORD}
|
||||
- ACCEPT_EULA=Y
|
||||
volumes:
|
||||
- mssql:/var/opt/mssql
|
||||
ports:
|
||||
- "${MSSQL_PORT}:1433"
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### MariaDB Container #######################################
|
||||
|
||||
@ -167,10 +205,12 @@ services:
|
||||
ports:
|
||||
- "${MARIADB_PORT}:3306"
|
||||
environment:
|
||||
MYSQL_DATABASE: ${MARIADB_DATABASE}
|
||||
MYSQL_USER: ${MARIADB_USER}
|
||||
MYSQL_PASSWORD: ${MARIADB_PASSWORD}
|
||||
MYSQL_ROOT_PASSWORD: ${MARIADB_PORT}
|
||||
- MYSQL_DATABASE=${MARIADB_DATABASE}
|
||||
- MYSQL_USER=${MARIADB_USER}
|
||||
- MYSQL_PASSWORD=${MARIADB_PASSWORD}
|
||||
- MYSQL_ROOT_PASSWORD=${MARIADB_ROOT_PASSWORD}
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### PostgreSQL Container ####################################
|
||||
|
||||
@ -181,9 +221,11 @@ services:
|
||||
ports:
|
||||
- "${POSTGRES_PORT}:5432"
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_USER: ${POSTGRES_USER}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### PostgreSQL PostGis Container ############################
|
||||
|
||||
@ -194,9 +236,11 @@ services:
|
||||
ports:
|
||||
- "${POSTGRES_PORT}:5432"
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_USER: ${POSTGRES_USER}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Neo4j Container #########################################
|
||||
|
||||
@ -209,6 +253,8 @@ services:
|
||||
- NEO4J_AUTH=default:secret
|
||||
volumes:
|
||||
- neo4j:/var/lib/neo4j/data
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### MongoDB Container #######################################
|
||||
|
||||
@ -218,6 +264,8 @@ services:
|
||||
- "27017:27017"
|
||||
volumes:
|
||||
- mongo:/data/db
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### RethinkDB Container #######################################
|
||||
|
||||
@ -227,6 +275,8 @@ services:
|
||||
- "8090:8080"
|
||||
volumes:
|
||||
- rethinkdb:/data/rethinkdb_data
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Redis Container #########################################
|
||||
|
||||
@ -236,6 +286,8 @@ services:
|
||||
- redis:/data
|
||||
ports:
|
||||
- "6379:6379"
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Aerospike c Container ###################################
|
||||
|
||||
@ -250,7 +302,8 @@ services:
|
||||
- "3001:3001"
|
||||
- "3002:3002"
|
||||
- "3003:3003"
|
||||
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Memcached Container #####################################
|
||||
|
||||
@ -262,6 +315,8 @@ services:
|
||||
- "${MEMCACHED_HOST_PORT}:11211"
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Beanstalkd Container ####################################
|
||||
|
||||
@ -272,6 +327,8 @@ services:
|
||||
privileged: true
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### RabbitMQ Container ######################################
|
||||
|
||||
@ -283,10 +340,12 @@ services:
|
||||
- "${RABBITMQ_MANAGEMENT_HTTPS_HOST_PORT}:15671"
|
||||
privileged: true
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER}
|
||||
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS}
|
||||
- RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER}
|
||||
- RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS}
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Beanstalkd Console Container ############################
|
||||
|
||||
@ -296,6 +355,8 @@ services:
|
||||
- "2080:2080"
|
||||
depends_on:
|
||||
- beanstalkd
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Caddy Server Container ##################################
|
||||
|
||||
@ -313,20 +374,37 @@ services:
|
||||
- caddy:/root/.caddy
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### phpMyAdmin Container ####################################
|
||||
|
||||
phpmyadmin:
|
||||
build: ./phpmyadmin
|
||||
environment:
|
||||
PMA_ARBITRARY: 1
|
||||
MYSQL_USER: ${PMA_USER}
|
||||
MYSQL_PASSWORD: ${PMA_PASSWORD}
|
||||
MYSQL_ROOT_PASSWORD: ${PMA_ROOT_PASSWORD}
|
||||
- PMA_ARBITRARY=1
|
||||
- MYSQL_USER=${PMA_USER}
|
||||
- MYSQL_PASSWORD=${PMA_PASSWORD}
|
||||
- MYSQL_ROOT_PASSWORD=${PMA_ROOT_PASSWORD}
|
||||
ports:
|
||||
- "${PMA_PORT}:80"
|
||||
depends_on:
|
||||
- "${PMA_DB_ENGINE}"
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
|
||||
### Adminer Container ####################################
|
||||
|
||||
adminer:
|
||||
build: ./adminer
|
||||
ports:
|
||||
- "${ADM_PORT}:8080"
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### pgAdmin Container #######################################
|
||||
|
||||
@ -336,6 +414,8 @@ services:
|
||||
- "5050:5050"
|
||||
depends_on:
|
||||
- postgres
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### ElasticSearch Container #################################
|
||||
|
||||
@ -349,6 +429,8 @@ services:
|
||||
- "${ELASTICSEARCH_HOST_TRANSPORT_PORT}:9300"
|
||||
depends_on:
|
||||
- php-fpm
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Certbot Container ##################################
|
||||
|
||||
@ -359,8 +441,10 @@ services:
|
||||
- ./data/certbot/certs/:/var/certs
|
||||
- ./certbot/letsencrypt/:/var/www/letsencrypt
|
||||
environment:
|
||||
CN: "fake.domain.com"
|
||||
EMAIL: "fake.email@gmail.com"
|
||||
- CN="fake.domain.com"
|
||||
- EMAIL="fake.email@gmail.com"
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Mailhog Container #########################################
|
||||
|
||||
@ -369,8 +453,10 @@ services:
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "8025:8025"
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Selenium Container #########################################
|
||||
### Selenium Container ########################################
|
||||
|
||||
selenium:
|
||||
build: ./selenium
|
||||
@ -378,12 +464,87 @@ services:
|
||||
- "${SELENIUM_PORT}:4444"
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Volumes Setup ###########################################
|
||||
### Varnish Proxy 1 ##########################################
|
||||
|
||||
proxy:
|
||||
build: ./varnish
|
||||
expose:
|
||||
- ${VARNISH_PORT}
|
||||
environment:
|
||||
- VARNISH_CONFIG=${VARNISH_CONFIG}
|
||||
- CACHE_SIZE=${VARNISH_PROXY1_CACHE_SIZE}
|
||||
- VARNISHD_PARAMS=${VARNISHD_PARAMS}
|
||||
- VARNISH_PORT=${VARNISH_PORT}
|
||||
- BACKEND_HOST=${VARNISH_PROXY1_BACKEND_HOST}
|
||||
- BACKEND_PORT=${VARNISH_BACKEND_PORT}
|
||||
- VARNISH_SERVER=${VARNISH_PROXY1_SERVER}
|
||||
links:
|
||||
- workspace
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Varnish Proxy 2 ##########################################
|
||||
|
||||
proxy2:
|
||||
build: ./varnish
|
||||
expose:
|
||||
- ${VARNISH_PORT}
|
||||
environment:
|
||||
- VARNISH_CONFIG=${VARNISH_CONFIG}
|
||||
- CACHE_SIZE=${VARNISH_PROXY2_CACHE_SIZE}
|
||||
- VARNISHD_PARAMS=${VARNISHD_PARAMS}
|
||||
- VARNISH_PORT=${VARNISH_PORT}
|
||||
- BACKEND_HOST=${VARNISH_PROXY2_BACKEND_HOST}
|
||||
- BACKEND_PORT=${VARNISH_BACKEND_PORT}
|
||||
- VARNISH_SERVER=${VARNISH_PROXY2_SERVER}
|
||||
links:
|
||||
- workspace
|
||||
networks:
|
||||
- frontend
|
||||
|
||||
### Balancer Haproxy ##########################################
|
||||
|
||||
balancer:
|
||||
build: ./haproxy
|
||||
ports:
|
||||
- "${HAPROXY_HOST_HTTP_PORT}:8085"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
links:
|
||||
- proxy
|
||||
- proxy2
|
||||
|
||||
### Jenkins ###################################################
|
||||
jenkins:
|
||||
build: ./jenkins
|
||||
environment:
|
||||
JAVA_OPTS: "-Djava.awt.headless=true"
|
||||
ports:
|
||||
- "${JENKINS_HOST_SLAVE_AGENT_PORT}:50000"
|
||||
- "${JENKINS_HOST_HTTP_PORT}:8080"
|
||||
privileged: true
|
||||
volumes:
|
||||
- ${JENKINS_HOME}:/var/jenkins_home
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
### Networks Setup ############################################
|
||||
|
||||
networks:
|
||||
frontend:
|
||||
driver: "bridge"
|
||||
backend:
|
||||
driver: "bridge"
|
||||
|
||||
### Volumes Setup #############################################
|
||||
|
||||
volumes:
|
||||
mysql:
|
||||
driver: "local"
|
||||
mssql:
|
||||
driver: "local"
|
||||
postgres:
|
||||
driver: "local"
|
||||
memcached:
|
||||
@ -402,6 +563,8 @@ volumes:
|
||||
driver: "local"
|
||||
phpmyadmin:
|
||||
driver: "local"
|
||||
adminer:
|
||||
driver: "local"
|
||||
aerospike:
|
||||
driver: "local"
|
||||
caddy:
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Contributing - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
@ -349,7 +349,7 @@ features, by not reporting duplicate issues.</em></p>
|
||||
<li>Install <a href="https://gohugo.io/">Hugo</a> on your machine (easy thing).</li>
|
||||
<li>Open the <code>DOCUMENTATION/_settings/content</code> and search for the markdown file you want to edit (every folder represents a section in the menu).</li>
|
||||
<li>Delete the <code>/docs</code> folder from the root.</li>
|
||||
<li>When you finish editing, run the <code>hugo</code> command to generate the HTML docs (in the <code>/docs</code>).</li>
|
||||
<li>When you finish editing, go to <code>DOCUMENTATION/_settings/</code> and run the <code>hugo</code> command to generate the HTML docs (inside new <code>/docs</code> folder).</li>
|
||||
</ol>
|
||||
|
||||
<h3 id="to-host-the-website-locally">To Host the website locally</h3>
|
||||
|
@ -52,7 +52,7 @@ features, by not reporting duplicate issues.</em></p>
|
||||
<li>Install <a href="https://gohugo.io/">Hugo</a> on your machine (easy thing).</li>
|
||||
<li>Open the <code>DOCUMENTATION/_settings/content</code> and search for the markdown file you want to edit (every folder represents a section in the menu).</li>
|
||||
<li>Delete the <code>/docs</code> folder from the root.</li>
|
||||
<li>When you finish editing, run the <code>hugo</code> command to generate the HTML docs (in the <code>/docs</code>).</li>
|
||||
<li>When you finish editing, go to <code>DOCUMENTATION/_settings/</code> and run the <code>hugo</code> command to generate the HTML docs (inside new <code>/docs</code> folder).</li>
|
||||
</ol>
|
||||
|
||||
<h3 id="to-host-the-website-locally">To Host the website locally</h3>
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Documentation - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
@ -888,6 +888,18 @@ docker-compose up -d mariadb phpmyadmin
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-Adminer"></a></p>
|
||||
|
||||
<h2 id="use-adminer">Use Adminer</h2>
|
||||
|
||||
<p>1 - Run the Adminer Container (<code>adminer</code>) with the <code>docker-compose up</code> command. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d adminer
|
||||
</code></pre>
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-pgAdmin"></a></p>
|
||||
|
||||
@ -1401,13 +1413,27 @@ e) set it to <code>true</code></p>
|
||||
|
||||
<h2 id="improve-speed-on-macos">Improve speed on MacOS</h2>
|
||||
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. You can get around this issue by using NFS to share your files betwen your host and your container.</p>
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. Likely there are some workarounds:</p>
|
||||
|
||||
<blockquote>
|
||||
<p>How to share files using NFS (d4m-nfs)</p>
|
||||
</blockquote>
|
||||
<h3 id="workaround-a-using-dinghy">Workaround A: using dinghy</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">d4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
<p><a href="https://github.com/codekitchen/dinghy">Dinghy</a> creates its own VM using docker-machine, it will not modify your existing docker-machine VMs.</p>
|
||||
|
||||
<p>Quick Setup giude, (we recommend you check their docs)</p>
|
||||
|
||||
<p>1) <code>brew tap codekitchen/dinghy</code></p>
|
||||
|
||||
<p>2) <code>brew install dinghy</code></p>
|
||||
|
||||
<p>3) <code>dinghy create --provider virtualbox</code> (must have virtualbox installed, but they support other providers if you prefer)</p>
|
||||
|
||||
<p>4) after the above command is done it will display some env variables, copy them to the bash profile or zsh or.. (this will instruct docker to use the server running inside the VM)</p>
|
||||
|
||||
<p>5) <code>docker-compose up ...</code></p>
|
||||
|
||||
<h3 id="workaround-b-using-d4m-nfs">Workaround B: using d4m-nfs</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">D4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
|
||||
<p>1) Update the Docker [File Sharing] preferences:</p>
|
||||
|
||||
@ -1423,23 +1449,30 @@ e) set it to <code>true</code></p>
|
||||
<p>4) Create (or edit) the file <code>~/d4m-nfs/etc/d4m-nfs-mounts.txt</code>, and write the follwing configuration in it:</p>
|
||||
|
||||
<pre><code class="language-txt">/Users:/Users
|
||||
/Volumes:/Volumes
|
||||
/private:/private
|
||||
</code></pre>
|
||||
|
||||
<p>5) Create (or edit) the file <code>/etc/exports</code>, make sure it exists and is empty. (There may be collisions if you come from Vagrant or if you already executed the <code>d4m-nfs.sh</code> script before).</p>
|
||||
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script:</p>
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script (might need Sudo):</p>
|
||||
|
||||
<pre><code class="language-bash">~/d4m-nfs/d4m-nfs.sh
|
||||
</code></pre>
|
||||
|
||||
<p>That’s it! Run your containers.. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d nginx mysql
|
||||
<pre><code class="language-bash">docker-compose up ...
|
||||
</code></pre>
|
||||
|
||||
<p><strong>Note:</strong> If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</p>
|
||||
<p><em>Note: If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</em></p>
|
||||
|
||||
<h3 id="other-good-workarounds">Other good workarounds:</h3>
|
||||
|
||||
<ul>
|
||||
<li><a href="https://github.com/EugenMayer/docker-sync">docker-sync</a></li>
|
||||
<li>Add more here..</li>
|
||||
</ul>
|
||||
|
||||
<p>More details about this issue <a href="https://github.com/docker/for-mac/issues/77">here</a>.</p>
|
||||
|
||||
<p><br>
|
||||
<a name="Common-Problems"></a></p>
|
||||
|
@ -591,6 +591,18 @@ docker-compose up -d mariadb phpmyadmin
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-Adminer"></a></p>
|
||||
|
||||
<h2 id="use-adminer">Use Adminer</h2>
|
||||
|
||||
<p>1 - Run the Adminer Container (<code>adminer</code>) with the <code>docker-compose up</code> command. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d adminer
|
||||
</code></pre>
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-pgAdmin"></a></p>
|
||||
|
||||
@ -1104,13 +1116,27 @@ e) set it to <code>true</code></p>
|
||||
|
||||
<h2 id="improve-speed-on-macos">Improve speed on MacOS</h2>
|
||||
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. You can get around this issue by using NFS to share your files betwen your host and your container.</p>
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. Likely there are some workarounds:</p>
|
||||
|
||||
<blockquote>
|
||||
<p>How to share files using NFS (d4m-nfs)</p>
|
||||
</blockquote>
|
||||
<h3 id="workaround-a-using-dinghy">Workaround A: using dinghy</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">d4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
<p><a href="https://github.com/codekitchen/dinghy">Dinghy</a> creates its own VM using docker-machine, it will not modify your existing docker-machine VMs.</p>
|
||||
|
||||
<p>Quick Setup giude, (we recommend you check their docs)</p>
|
||||
|
||||
<p>1) <code>brew tap codekitchen/dinghy</code></p>
|
||||
|
||||
<p>2) <code>brew install dinghy</code></p>
|
||||
|
||||
<p>3) <code>dinghy create --provider virtualbox</code> (must have virtualbox installed, but they support other providers if you prefer)</p>
|
||||
|
||||
<p>4) after the above command is done it will display some env variables, copy them to the bash profile or zsh or.. (this will instruct docker to use the server running inside the VM)</p>
|
||||
|
||||
<p>5) <code>docker-compose up ...</code></p>
|
||||
|
||||
<h3 id="workaround-b-using-d4m-nfs">Workaround B: using d4m-nfs</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">D4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
|
||||
<p>1) Update the Docker [File Sharing] preferences:</p>
|
||||
|
||||
@ -1126,23 +1152,30 @@ e) set it to <code>true</code></p>
|
||||
<p>4) Create (or edit) the file <code>~/d4m-nfs/etc/d4m-nfs-mounts.txt</code>, and write the follwing configuration in it:</p>
|
||||
|
||||
<pre><code class="language-txt">/Users:/Users
|
||||
/Volumes:/Volumes
|
||||
/private:/private
|
||||
</code></pre>
|
||||
|
||||
<p>5) Create (or edit) the file <code>/etc/exports</code>, make sure it exists and is empty. (There may be collisions if you come from Vagrant or if you already executed the <code>d4m-nfs.sh</code> script before).</p>
|
||||
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script:</p>
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script (might need Sudo):</p>
|
||||
|
||||
<pre><code class="language-bash">~/d4m-nfs/d4m-nfs.sh
|
||||
</code></pre>
|
||||
|
||||
<p>That&rsquo;s it! Run your containers.. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d nginx mysql
|
||||
<pre><code class="language-bash">docker-compose up ...
|
||||
</code></pre>
|
||||
|
||||
<p><strong>Note:</strong> If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</p>
|
||||
<p><em>Note: If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</em></p>
|
||||
|
||||
<h3 id="other-good-workarounds">Other good workarounds:</h3>
|
||||
|
||||
<ul>
|
||||
<li><a href="https://github.com/EugenMayer/docker-sync">docker-sync</a></li>
|
||||
<li>Add more here..</li>
|
||||
</ul>
|
||||
|
||||
<p>More details about this issue <a href="https://github.com/docker/for-mac/issues/77">here</a>.</p>
|
||||
|
||||
<p><br>
|
||||
<a name="Common-Problems"></a></p>
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Getting Started - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
@ -356,7 +356,7 @@
|
||||
<ul>
|
||||
<li><p>If you are not using Git yet for your project, you can use <code>git clone</code> instead of <code>git submodule</code>.</p></li>
|
||||
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="#keep-tracking-Laradock">Check this</a></p></li>
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="http://laradock.io/documentation/#keep-track-of-your-laradock-changes">Check this</a></p></li>
|
||||
</ul>
|
||||
|
||||
<p>Your folder structure should look like this:</p>
|
||||
@ -394,8 +394,9 @@
|
||||
|
||||
<p><em>Or you can keep <code>default.conf</code> as it is, and create a separate config <code>my-site.conf</code> file for it.</em></p>
|
||||
|
||||
<p><strong>In case of Apache:</strong> :P
|
||||
<br></p>
|
||||
<p><strong>In case of Apache:</strong> :P</p>
|
||||
|
||||
<p><br></p>
|
||||
|
||||
<blockquote>
|
||||
<p><strong>Now jump to the <a href="#Usage">Usage</a> section.</strong></p>
|
||||
@ -448,8 +449,8 @@
|
||||
<p>If you are using <strong>Docker Toolbox</strong> (VM), do one of the following:</p>
|
||||
|
||||
<ul>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.* (Visit the <code>Laradock-ToolBox</code> <a href="https://github.com/laradock/laradock/tree/Laradock-ToolBox">Branch</a>).</li>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="http://laradock.io/documentation/#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.*. Visit the <a href="https://github.com/laradock/laradock/tree/LaraDock-ToolBox">LaraDock-ToolBox</a> branch. <em>(outdated)</em></li>
|
||||
</ul>
|
||||
|
||||
<p><br></p>
|
||||
@ -479,7 +480,7 @@
|
||||
<p>You can select your own combination of containers form the list below:</p>
|
||||
|
||||
<blockquote>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more…!</p>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>adminer</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more…!</p>
|
||||
</blockquote>
|
||||
|
||||
<p><em>(Please note that sometimes we forget to update the docs, so check the <code>docker-compose.yml</code> file to see an updated list of all available containers).</em></p>
|
||||
|
@ -59,7 +59,7 @@
|
||||
<ul>
|
||||
<li><p>If you are not using Git yet for your project, you can use <code>git clone</code> instead of <code>git submodule</code>.</p></li>
|
||||
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="#keep-tracking-Laradock">Check this</a></p></li>
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="http://laradock.io/documentation/#keep-track-of-your-laradock-changes">Check this</a></p></li>
|
||||
</ul>
|
||||
|
||||
<p>Your folder structure should look like this:</p>
|
||||
@ -97,8 +97,9 @@
|
||||
|
||||
<p><em>Or you can keep <code>default.conf</code> as it is, and create a separate config <code>my-site.conf</code> file for it.</em></p>
|
||||
|
||||
<p><strong>In case of Apache:</strong> :P
|
||||
<br></p>
|
||||
<p><strong>In case of Apache:</strong> :P</p>
|
||||
|
||||
<p><br></p>
|
||||
|
||||
<blockquote>
|
||||
<p><strong>Now jump to the <a href="#Usage">Usage</a> section.</strong></p>
|
||||
@ -151,8 +152,8 @@
|
||||
<p>If you are using <strong>Docker Toolbox</strong> (VM), do one of the following:</p>
|
||||
|
||||
<ul>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.* (Visit the <code>Laradock-ToolBox</code> <a href="https://github.com/laradock/laradock/tree/Laradock-ToolBox">Branch</a>).</li>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="http://laradock.io/documentation/#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.*. Visit the <a href="https://github.com/laradock/laradock/tree/LaraDock-ToolBox">LaraDock-ToolBox</a> branch. <em>(outdated)</em></li>
|
||||
</ul>
|
||||
|
||||
<p><br></p>
|
||||
@ -182,7 +183,7 @@
|
||||
<p>You can select your own combination of containers form the list below:</p>
|
||||
|
||||
<blockquote>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more&hellip;!</p>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>adminer</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more&hellip;!</p>
|
||||
</blockquote>
|
||||
|
||||
<p><em>(Please note that sometimes we forget to update the docs, so check the <code>docker-compose.yml</code> file to see an updated list of all available containers).</em></p>
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Help & Questions - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
@ -468,6 +468,7 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<ul>
|
||||
<li>PhpMyAdmin</li>
|
||||
<li>Adminer</li>
|
||||
<li>PgAdmin</li>
|
||||
<li>ElasticSearch</li>
|
||||
<li>Selenium</li>
|
||||
@ -554,7 +555,7 @@ QUEUE_HOST=beanstalkd
|
||||
<ul>
|
||||
<li><p>If you are not using Git yet for your project, you can use <code>git clone</code> instead of <code>git submodule</code>.</p></li>
|
||||
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="#keep-tracking-Laradock">Check this</a></p></li>
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="http://laradock.io/documentation/#keep-track-of-your-laradock-changes">Check this</a></p></li>
|
||||
</ul>
|
||||
|
||||
<p>Your folder structure should look like this:</p>
|
||||
@ -592,8 +593,9 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<p><em>Or you can keep <code>default.conf</code> as it is, and create a separate config <code>my-site.conf</code> file for it.</em></p>
|
||||
|
||||
<p><strong>In case of Apache:</strong> :P
|
||||
<br></p>
|
||||
<p><strong>In case of Apache:</strong> :P</p>
|
||||
|
||||
<p><br></p>
|
||||
|
||||
<blockquote>
|
||||
<p><strong>Now jump to the <a href="#Usage">Usage</a> section.</strong></p>
|
||||
@ -646,8 +648,8 @@ QUEUE_HOST=beanstalkd
|
||||
<p>If you are using <strong>Docker Toolbox</strong> (VM), do one of the following:</p>
|
||||
|
||||
<ul>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.* (Visit the <code>Laradock-ToolBox</code> <a href="https://github.com/laradock/laradock/tree/Laradock-ToolBox">Branch</a>).</li>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="http://laradock.io/documentation/#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.*. Visit the <a href="https://github.com/laradock/laradock/tree/LaraDock-ToolBox">LaraDock-ToolBox</a> branch. <em>(outdated)</em></li>
|
||||
</ul>
|
||||
|
||||
<p><br></p>
|
||||
@ -677,7 +679,7 @@ QUEUE_HOST=beanstalkd
|
||||
<p>You can select your own combination of containers form the list below:</p>
|
||||
|
||||
<blockquote>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more…!</p>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>adminer</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more…!</p>
|
||||
</blockquote>
|
||||
|
||||
<p><em>(Please note that sometimes we forget to update the docs, so check the <code>docker-compose.yml</code> file to see an updated list of all available containers).</em></p>
|
||||
@ -1293,6 +1295,18 @@ docker-compose up -d mariadb phpmyadmin
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-Adminer"></a></p>
|
||||
|
||||
<h2 id="use-adminer">Use Adminer</h2>
|
||||
|
||||
<p>1 - Run the Adminer Container (<code>adminer</code>) with the <code>docker-compose up</code> command. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d adminer
|
||||
</code></pre>
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-pgAdmin"></a></p>
|
||||
|
||||
@ -1806,13 +1820,27 @@ e) set it to <code>true</code></p>
|
||||
|
||||
<h2 id="improve-speed-on-macos">Improve speed on MacOS</h2>
|
||||
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. You can get around this issue by using NFS to share your files betwen your host and your container.</p>
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. Likely there are some workarounds:</p>
|
||||
|
||||
<blockquote>
|
||||
<p>How to share files using NFS (d4m-nfs)</p>
|
||||
</blockquote>
|
||||
<h3 id="workaround-a-using-dinghy">Workaround A: using dinghy</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">d4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
<p><a href="https://github.com/codekitchen/dinghy">Dinghy</a> creates its own VM using docker-machine, it will not modify your existing docker-machine VMs.</p>
|
||||
|
||||
<p>Quick Setup giude, (we recommend you check their docs)</p>
|
||||
|
||||
<p>1) <code>brew tap codekitchen/dinghy</code></p>
|
||||
|
||||
<p>2) <code>brew install dinghy</code></p>
|
||||
|
||||
<p>3) <code>dinghy create --provider virtualbox</code> (must have virtualbox installed, but they support other providers if you prefer)</p>
|
||||
|
||||
<p>4) after the above command is done it will display some env variables, copy them to the bash profile or zsh or.. (this will instruct docker to use the server running inside the VM)</p>
|
||||
|
||||
<p>5) <code>docker-compose up ...</code></p>
|
||||
|
||||
<h3 id="workaround-b-using-d4m-nfs">Workaround B: using d4m-nfs</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">D4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
|
||||
<p>1) Update the Docker [File Sharing] preferences:</p>
|
||||
|
||||
@ -1828,23 +1856,30 @@ e) set it to <code>true</code></p>
|
||||
<p>4) Create (or edit) the file <code>~/d4m-nfs/etc/d4m-nfs-mounts.txt</code>, and write the follwing configuration in it:</p>
|
||||
|
||||
<pre><code class="language-txt">/Users:/Users
|
||||
/Volumes:/Volumes
|
||||
/private:/private
|
||||
</code></pre>
|
||||
|
||||
<p>5) Create (or edit) the file <code>/etc/exports</code>, make sure it exists and is empty. (There may be collisions if you come from Vagrant or if you already executed the <code>d4m-nfs.sh</code> script before).</p>
|
||||
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script:</p>
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script (might need Sudo):</p>
|
||||
|
||||
<pre><code class="language-bash">~/d4m-nfs/d4m-nfs.sh
|
||||
</code></pre>
|
||||
|
||||
<p>That’s it! Run your containers.. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d nginx mysql
|
||||
<pre><code class="language-bash">docker-compose up ...
|
||||
</code></pre>
|
||||
|
||||
<p><strong>Note:</strong> If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</p>
|
||||
<p><em>Note: If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</em></p>
|
||||
|
||||
<h3 id="other-good-workarounds">Other good workarounds:</h3>
|
||||
|
||||
<ul>
|
||||
<li><a href="https://github.com/EugenMayer/docker-sync">docker-sync</a></li>
|
||||
<li>Add more here..</li>
|
||||
</ul>
|
||||
|
||||
<p>More details about this issue <a href="https://github.com/docker/for-mac/issues/77">here</a>.</p>
|
||||
|
||||
<p><br>
|
||||
<a name="Common-Problems"></a></p>
|
||||
@ -1980,7 +2015,7 @@ features, by not reporting duplicate issues.</em></p>
|
||||
<li>Install <a href="https://gohugo.io/">Hugo</a> on your machine (easy thing).</li>
|
||||
<li>Open the <code>DOCUMENTATION/_settings/content</code> and search for the markdown file you want to edit (every folder represents a section in the menu).</li>
|
||||
<li>Delete the <code>/docs</code> folder from the root.</li>
|
||||
<li>When you finish editing, run the <code>hugo</code> command to generate the HTML docs (in the <code>/docs</code>).</li>
|
||||
<li>When you finish editing, go to <code>DOCUMENTATION/_settings/</code> and run the <code>hugo</code> command to generate the HTML docs (inside new <code>/docs</code> folder).</li>
|
||||
</ol>
|
||||
|
||||
<h3 id="to-host-the-website-locally">To Host the website locally</h3>
|
||||
|
@ -170,6 +170,7 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<ul>
|
||||
<li>PhpMyAdmin</li>
|
||||
<li>Adminer</li>
|
||||
<li>PgAdmin</li>
|
||||
<li>ElasticSearch</li>
|
||||
<li>Selenium</li>
|
||||
@ -261,7 +262,7 @@ QUEUE_HOST=beanstalkd
|
||||
<ul>
|
||||
<li><p>If you are not using Git yet for your project, you can use <code>git clone</code> instead of <code>git submodule</code>.</p></li>
|
||||
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="#keep-tracking-Laradock">Check this</a></p></li>
|
||||
<li><p>Note 2: To keep track of your Laradock changes, between your projects and also keep Laradock updated. <a href="http://laradock.io/documentation/#keep-track-of-your-laradock-changes">Check this</a></p></li>
|
||||
</ul>
|
||||
|
||||
<p>Your folder structure should look like this:</p>
|
||||
@ -299,8 +300,9 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<p><em>Or you can keep <code>default.conf</code> as it is, and create a separate config <code>my-site.conf</code> file for it.</em></p>
|
||||
|
||||
<p><strong>In case of Apache:</strong> :P
|
||||
<br></p>
|
||||
<p><strong>In case of Apache:</strong> :P</p>
|
||||
|
||||
<p><br></p>
|
||||
|
||||
<blockquote>
|
||||
<p><strong>Now jump to the <a href="#Usage">Usage</a> section.</strong></p>
|
||||
@ -353,8 +355,8 @@ QUEUE_HOST=beanstalkd
|
||||
<p>If you are using <strong>Docker Toolbox</strong> (VM), do one of the following:</p>
|
||||
|
||||
<ul>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.* (Visit the <code>Laradock-ToolBox</code> <a href="https://github.com/laradock/laradock/tree/Laradock-ToolBox">Branch</a>).</li>
|
||||
<li>Upgrade to Docker <a href="https://www.docker.com/products/docker">Native</a> for Mac/Windows (Recommended). Check out <a href="http://laradock.io/documentation/#upgrading-laradock">Upgrading Laradock</a></li>
|
||||
<li>Use Laradock v3.*. Visit the <a href="https://github.com/laradock/laradock/tree/LaraDock-ToolBox">LaraDock-ToolBox</a> branch. <em>(outdated)</em></li>
|
||||
</ul>
|
||||
|
||||
<p><br></p>
|
||||
@ -384,7 +386,7 @@ QUEUE_HOST=beanstalkd
|
||||
<p>You can select your own combination of containers form the list below:</p>
|
||||
|
||||
<blockquote>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more&hellip;!</p>
|
||||
<p><code>nginx</code>, <code>hhvm</code>, <code>php-fpm</code>, <code>mysql</code>, <code>redis</code>, <code>postgres</code>, <code>mariadb</code>, <code>neo4j</code>, <code>mongo</code>, <code>apache2</code>, <code>caddy</code>, <code>memcached</code>, <code>beanstalkd</code>, <code>beanstalkd-console</code>, <code>rabbitmq</code>, <code>beanstalkd-console</code>, <code>workspace</code>, <code>phpmyadmin</code>, <code>adminer</code>, <code>aerospike</code>, <code>pgadmin</code>, <code>elasticsearch</code>, <code>rethinkdb</code>, <code>postgres-postgis</code>, <code>certbot</code>, <code>mailhog</code>, <code>minio</code> and more&hellip;!</p>
|
||||
</blockquote>
|
||||
|
||||
<p><em>(Please note that sometimes we forget to update the docs, so check the <code>docker-compose.yml</code> file to see an updated list of all available containers).</em></p>
|
||||
@ -1005,6 +1007,18 @@ docker-compose up -d mariadb phpmyadmin
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-Adminer"></a></p>
|
||||
|
||||
<h2 id="use-adminer">Use Adminer</h2>
|
||||
|
||||
<p>1 - Run the Adminer Container (<code>adminer</code>) with the <code>docker-compose up</code> command. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d adminer
|
||||
</code></pre>
|
||||
|
||||
<p>2 - Open your browser and visit the localhost on port <strong>8080</strong>: <code>http://localhost:8080</code></p>
|
||||
|
||||
<p><br>
|
||||
<a name="Use-pgAdmin"></a></p>
|
||||
|
||||
@ -1518,13 +1532,27 @@ e) set it to <code>true</code></p>
|
||||
|
||||
<h2 id="improve-speed-on-macos">Improve speed on MacOS</h2>
|
||||
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. You can get around this issue by using NFS to share your files betwen your host and your container.</p>
|
||||
<p>Sharing code into Docker containers with osxfs have very poor performance compared to Linux. Likely there are some workarounds:</p>
|
||||
|
||||
<blockquote>
|
||||
<p>How to share files using NFS (d4m-nfs)</p>
|
||||
</blockquote>
|
||||
<h3 id="workaround-a-using-dinghy">Workaround A: using dinghy</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">d4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
<p><a href="https://github.com/codekitchen/dinghy">Dinghy</a> creates its own VM using docker-machine, it will not modify your existing docker-machine VMs.</p>
|
||||
|
||||
<p>Quick Setup giude, (we recommend you check their docs)</p>
|
||||
|
||||
<p>1) <code>brew tap codekitchen/dinghy</code></p>
|
||||
|
||||
<p>2) <code>brew install dinghy</code></p>
|
||||
|
||||
<p>3) <code>dinghy create --provider virtualbox</code> (must have virtualbox installed, but they support other providers if you prefer)</p>
|
||||
|
||||
<p>4) after the above command is done it will display some env variables, copy them to the bash profile or zsh or.. (this will instruct docker to use the server running inside the VM)</p>
|
||||
|
||||
<p>5) <code>docker-compose up ...</code></p>
|
||||
|
||||
<h3 id="workaround-b-using-d4m-nfs">Workaround B: using d4m-nfs</h3>
|
||||
|
||||
<p><a href="https://github.com/IFSight/d4m-nfs">D4m-nfs</a> automatically mount NFS volume instead of osxfs one.</p>
|
||||
|
||||
<p>1) Update the Docker [File Sharing] preferences:</p>
|
||||
|
||||
@ -1540,23 +1568,30 @@ e) set it to <code>true</code></p>
|
||||
<p>4) Create (or edit) the file <code>~/d4m-nfs/etc/d4m-nfs-mounts.txt</code>, and write the follwing configuration in it:</p>
|
||||
|
||||
<pre><code class="language-txt">/Users:/Users
|
||||
/Volumes:/Volumes
|
||||
/private:/private
|
||||
</code></pre>
|
||||
|
||||
<p>5) Create (or edit) the file <code>/etc/exports</code>, make sure it exists and is empty. (There may be collisions if you come from Vagrant or if you already executed the <code>d4m-nfs.sh</code> script before).</p>
|
||||
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script:</p>
|
||||
<p>6) Run the <code>d4m-nfs.sh</code> script (might need Sudo):</p>
|
||||
|
||||
<pre><code class="language-bash">~/d4m-nfs/d4m-nfs.sh
|
||||
</code></pre>
|
||||
|
||||
<p>That&rsquo;s it! Run your containers.. Example:</p>
|
||||
|
||||
<pre><code class="language-bash">docker-compose up -d nginx mysql
|
||||
<pre><code class="language-bash">docker-compose up ...
|
||||
</code></pre>
|
||||
|
||||
<p><strong>Note:</strong> If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</p>
|
||||
<p><em>Note: If you faced any errors, try restarting Docker, and make sure you have no spaces in the <code>d4m-nfs-mounts.txt</code> file, and your <code>/etc/exports</code> file is clear.</em></p>
|
||||
|
||||
<h3 id="other-good-workarounds">Other good workarounds:</h3>
|
||||
|
||||
<ul>
|
||||
<li><a href="https://github.com/EugenMayer/docker-sync">docker-sync</a></li>
|
||||
<li>Add more here..</li>
|
||||
</ul>
|
||||
|
||||
<p>More details about this issue <a href="https://github.com/docker/for-mac/issues/77">here</a>.</p>
|
||||
|
||||
<p><br>
|
||||
<a name="Common-Problems"></a></p>
|
||||
@ -1707,7 +1742,7 @@ features, by not reporting duplicate issues.</em></p>
|
||||
<li>Install <a href="https://gohugo.io/">Hugo</a> on your machine (easy thing).</li>
|
||||
<li>Open the <code>DOCUMENTATION/_settings/content</code> and search for the markdown file you want to edit (every folder represents a section in the menu).</li>
|
||||
<li>Delete the <code>/docs</code> folder from the root.</li>
|
||||
<li>When you finish editing, run the <code>hugo</code> command to generate the HTML docs (in the <code>/docs</code>).</li>
|
||||
<li>When you finish editing, go to <code>DOCUMENTATION/_settings/</code> and run the <code>hugo</code> command to generate the HTML docs (inside new <code>/docs</code> folder).</li>
|
||||
</ol>
|
||||
|
||||
<h3 id="to-host-the-website-locally">To Host the website locally</h3>
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Introduction - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
@ -467,6 +467,7 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<ul>
|
||||
<li>PhpMyAdmin</li>
|
||||
<li>Adminer</li>
|
||||
<li>PgAdmin</li>
|
||||
<li>ElasticSearch</li>
|
||||
<li>Selenium</li>
|
||||
|
@ -170,6 +170,7 @@ QUEUE_HOST=beanstalkd
|
||||
|
||||
<ul>
|
||||
<li>PhpMyAdmin</li>
|
||||
<li>Adminer</li>
|
||||
<li>PgAdmin</li>
|
||||
<li>ElasticSearch</li>
|
||||
<li>Selenium</li>
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>License - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
|
@ -10,7 +10,7 @@
|
||||
<meta name="viewport" content="width=device-width,user-scalable=no,initial-scale=1,maximum-scale=1">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=10" />
|
||||
<title>Related Projects - Laradock</title>
|
||||
<meta name="generator" content="Hugo 0.18.1" />
|
||||
<meta name="generator" content="Hugo 0.19" />
|
||||
|
||||
|
||||
<meta name="description" content="Laradock documentations.">
|
||||
|
39
env-example
39
env-example
@ -3,9 +3,9 @@
|
||||
APPLICATION=../
|
||||
|
||||
### PHP version (Does not apply for HHVM)
|
||||
# PHP_VERSION=55
|
||||
# PHP_VERSION=56
|
||||
PHP_VERSION=70
|
||||
# PHP_VERSION=71
|
||||
|
||||
### PHP interpreter
|
||||
# PHP_INTERPRETER=hhvm
|
||||
@ -26,6 +26,7 @@ WORKSPACE_INSTALL_LARAVEL_ENVOY=false
|
||||
WORKSPACE_INSTALL_DEPLOYER=false
|
||||
WORKSPACE_INSTALL_LINUXBREW=false
|
||||
WORKSPACE_INSTALL_MC=false
|
||||
WORKSPACE_INSTALL_SYMFONY=false
|
||||
WORKSPACE_PUID=1000
|
||||
WORKSPACE_PGID=1000
|
||||
WORKSPACE_NODE_VERSION=stable
|
||||
@ -34,7 +35,6 @@ WORKSPACE_TIMEZONE=UTC
|
||||
WORKSPACE_SSH_PORT=2222
|
||||
|
||||
### PHP_FPM Container
|
||||
PHP_FPM_DOCKER_FILE=Dockerfile-70
|
||||
PHP_FPM_INSTALL_XDEBUG=false
|
||||
PHP_FPM_INSTALL_MONGO=false
|
||||
PHP_FPM_INSTALL_SOAP=false
|
||||
@ -48,6 +48,7 @@ PHP_FPM_INSTALL_AEROSPIKE_EXTENSION=false
|
||||
PHP_FPM_INSTALL_MYSQLI=false
|
||||
PHP_FPM_INSTALL_TOKENIZER=false
|
||||
PHP_FPM_INSTALL_INTL=false
|
||||
PHP_FPM_INSTALL_GHOSTSCRIPT=false
|
||||
|
||||
### NGINX Container
|
||||
NGINX_HOST_HTTP_PORT=80
|
||||
@ -69,12 +70,17 @@ MYSQL_PASSWORD=secret
|
||||
MYSQL_PORT=3306
|
||||
MYSQL_ROOT_PASSWORD=root
|
||||
|
||||
### MSSQL Container
|
||||
MSSQL_DATABASE=homestead
|
||||
MSSQL_PASSWORD=yourStrong(!)Password
|
||||
MSSQL_PORT=1433
|
||||
|
||||
### MARIADB Container
|
||||
MARIADB_DATABASE=default
|
||||
MARIADB_USER=default
|
||||
MARIADB_PASSWORD=secret
|
||||
MARIADB_PORT=3306
|
||||
|
||||
MARIADB_ROOT_PASSWORD=root
|
||||
|
||||
### POSTGRES Container
|
||||
POSTGRES_DB=default
|
||||
@ -118,6 +124,33 @@ PMA_PASSWORD=secret
|
||||
PMA_ROOT_PASSWORD=secret
|
||||
PMA_PORT=88
|
||||
|
||||
### ADMINER Container
|
||||
ADM_PORT=88
|
||||
|
||||
### VARNISH Container
|
||||
VARNISH_CONFIG=/etc/varnish/default.vcl
|
||||
VARNISH_PORT=8080
|
||||
VARNISH_BACKEND_PORT=8888
|
||||
VARNISHD_PARAMS=-p default_ttl=3600 -p default_grace=3600
|
||||
|
||||
### Varnish Proxy 1 Container
|
||||
VARNISH_PROXY1_CACHE_SIZE=128m
|
||||
VARNISH_PROXY1_BACKEND_HOST=workspace
|
||||
VARNISH_PROXY1_SERVER=SERVER1
|
||||
|
||||
### Varnish Proxy 2 Container
|
||||
VARNISH_PROXY2_CACHE_SIZE=128m
|
||||
VARNISH_PROXY2_BACKEND_HOST=workspace
|
||||
VARNISH_PROXY2_SERVER=SERVER2
|
||||
|
||||
### HAPROXY Container
|
||||
HAPROXY_HOST_HTTP_PORT=8085
|
||||
|
||||
### JENKINS Container
|
||||
JENKINS_HOST_HTTP_PORT=8090
|
||||
JENKINS_HOST_SLAVE_AGENT_PORT=50000
|
||||
JENKINS_HOME=./jenkins/jenkins_home
|
||||
|
||||
### MISC
|
||||
# Replace with your Docker Host IP (will be appended to /etc/hosts)
|
||||
DOCKER_HOST_IP=10.0.75.1
|
||||
|
5
haproxy/Dockerfile
Normal file
5
haproxy/Dockerfile
Normal file
@ -0,0 +1,5 @@
|
||||
FROM dockercloud/haproxy:latest
|
||||
|
||||
MAINTAINER ZeroC0D3 Team<zeroc0d3.team@gmail.com>
|
||||
|
||||
EXPOSE 80
|
@ -1,5 +1,7 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
RUN apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0x5a16e7281be7a449
|
||||
|
||||
RUN apt-get update -y \
|
||||
|
23
jenkins/.github/ISSUE_TEMPLATE.md
vendored
Normal file
23
jenkins/.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Issues and Contributing
|
||||
|
||||
Please note that only issues related to this Docker image will be addressed here.
|
||||
|
||||
* If you have Docker related issues, please ask in the [Docker user mailing list](https://groups.google.com/forum/#!forum/docker-user).
|
||||
* If you have Jenkins related issues, please ask in the [Jenkins mailing lists](https://jenkins-ci.org/content/mailing-lists).
|
||||
* If you are not sure, then this is probably not the place to create an issue and you should use any of the previously mentioned mailing lists.
|
||||
|
||||
If after going through the previous checklist you still think you should create an issue here please provide:
|
||||
|
||||
|
||||
### Docker commands that you execute
|
||||
|
||||
### Actual result
|
||||
|
||||
### Expected outcome
|
||||
|
||||
### Have you tried a non-dockerized Jenkins and get the expected outcome?
|
||||
|
||||
### Output of `docker version`
|
||||
|
||||
### Other relevant information
|
||||
|
6
jenkins/.gitmodules
vendored
Normal file
6
jenkins/.gitmodules
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
[submodule "tests/test_helper/bats-support"]
|
||||
path = tests/test_helper/bats-support
|
||||
url = https://github.com/ztombol/bats-support
|
||||
[submodule "tests/test_helper/bats-assert"]
|
||||
path = tests/test_helper/bats-assert
|
||||
url = https://github.com/ztombol/bats-assert
|
16
jenkins/CONTRIBUTING.md
Normal file
16
jenkins/CONTRIBUTING.md
Normal file
@ -0,0 +1,16 @@
|
||||
# Issues and Contributing
|
||||
|
||||
Please note that only issues related to this Docker image will be addressed here.
|
||||
|
||||
* If you have Docker related issues, please ask in the [Docker user mailing list](https://groups.google.com/forum/#!forum/docker-user).
|
||||
* If you have Jenkins related issues, please ask in the [Jenkins mailing lists](https://jenkins-ci.org/content/mailing-lists).
|
||||
* If you are not sure, then this is probably not the place to create an issue and you should use any of the previously mentioned mailing lists.
|
||||
|
||||
If after going through the previous checklist you still think you should create an issue here please provide:
|
||||
|
||||
* Docker commands that you execute
|
||||
* Actual result
|
||||
* Expected outcome
|
||||
* Have you tried a non-dockerized Jenkins and get the expected outcome?
|
||||
* Output of `docker version`
|
||||
* Other relevant information
|
108
jenkins/Dockerfile
Normal file
108
jenkins/Dockerfile
Normal file
@ -0,0 +1,108 @@
|
||||
FROM openjdk:8-jdk
|
||||
|
||||
RUN apt-get update && apt-get install -y git curl && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV JENKINS_HOME /var/jenkins_home
|
||||
ENV JENKINS_SLAVE_AGENT_PORT 50000
|
||||
|
||||
ARG user=jenkins
|
||||
ARG group=jenkins
|
||||
ARG uid=1000
|
||||
ARG gid=1000
|
||||
|
||||
# Jenkins is run with user `jenkins`, uid = 1000
|
||||
# If you bind mount a volume from the host or a data container,
|
||||
# ensure you use the same uid
|
||||
RUN groupadd -g ${gid} ${group} \
|
||||
&& useradd -d "$JENKINS_HOME" -u ${uid} -g ${gid} -m -s /bin/bash ${user}
|
||||
|
||||
# Jenkins home directory is a volume, so configuration and build history
|
||||
# can be persisted and survive image upgrades
|
||||
VOLUME /var/jenkins_home
|
||||
|
||||
# `/usr/share/jenkins/ref/` contains all reference configuration we want
|
||||
# to set on a fresh new installation. Use it to bundle additional plugins
|
||||
# or config file with your custom jenkins Docker image.
|
||||
RUN mkdir -p /usr/share/jenkins/ref/init.groovy.d
|
||||
|
||||
ENV TINI_VERSION 0.13.2
|
||||
ENV TINI_SHA afbf8de8a63ce8e4f18cb3f34dfdbbd354af68a1
|
||||
|
||||
# Use tini as subreaper in Docker container to adopt zombie processes
|
||||
RUN curl -fsSL https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-static-amd64 -o /bin/tini && chmod +x /bin/tini \
|
||||
&& echo "$TINI_SHA /bin/tini" | sha1sum -c -
|
||||
|
||||
COPY init.groovy /usr/share/jenkins/ref/init.groovy.d/tcp-slave-agent-port.groovy
|
||||
|
||||
# jenkins version being bundled in this docker image
|
||||
ARG JENKINS_VERSION
|
||||
ENV JENKINS_VERSION ${JENKINS_VERSION:-2.32.3}
|
||||
|
||||
# jenkins.war checksum, download will be validated using it
|
||||
ARG JENKINS_SHA=a25b9a314ca9e76f9673da7309e1882e32674223
|
||||
|
||||
# Can be used to customize where jenkins.war get downloaded from
|
||||
ARG JENKINS_URL=https://repo.jenkins-ci.org/public/org/jenkins-ci/main/jenkins-war/${JENKINS_VERSION}/jenkins-war-${JENKINS_VERSION}.war
|
||||
|
||||
# could use ADD but this one does not check Last-Modified header neither does it allow to control checksum
|
||||
# see https://github.com/docker/docker/issues/8331
|
||||
RUN curl -fsSL ${JENKINS_URL} -o /usr/share/jenkins/jenkins.war \
|
||||
&& echo "${JENKINS_SHA} /usr/share/jenkins/jenkins.war" | sha1sum -c -
|
||||
|
||||
ENV JENKINS_UC https://updates.jenkins.io
|
||||
RUN chown -R ${user} "$JENKINS_HOME" /usr/share/jenkins/ref
|
||||
|
||||
|
||||
# Add jenkins to the correct group
|
||||
# see http://stackoverflow.com/questions/42164653/docker-in-docker-permissions-error
|
||||
# use "getent group docker | awk -F: '{printf "%d\n", $3}'" command on host to find correct value for gid or simply use 'id'
|
||||
ARG DOCKER_GID=998
|
||||
|
||||
RUN groupadd -g ${DOCKER_GID} docker \
|
||||
&& curl -sSL https://get.docker.com/ | sh \
|
||||
&& apt-get -q autoremove \
|
||||
&& apt-get -q clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/*.bin
|
||||
|
||||
# Install Docker-in-Docker from git@github.com:jpetazzo/dind.git
|
||||
# RUN apt-get update -qq && apt-get install -qqy apt-transport-https ca-certificates curl lxc iptables
|
||||
# Install Docker from Docker Inc. repositories.
|
||||
RUN apt-get install -y curl && curl -sSL https://get.docker.com/ | sh
|
||||
RUN usermod -aG docker jenkins
|
||||
|
||||
# Install Docker-Compose
|
||||
RUN curl -L "https://github.com/docker/compose/releases/download/1.10.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
RUN chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
|
||||
# for main web interface:
|
||||
EXPOSE 8080
|
||||
|
||||
# will be used by attached slave agents:
|
||||
EXPOSE 50000
|
||||
|
||||
ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log
|
||||
|
||||
USER ${user}
|
||||
|
||||
COPY jenkins-support /usr/local/bin/jenkins-support
|
||||
COPY jenkins.sh /usr/local/bin/jenkins.sh
|
||||
ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/jenkins.sh"]
|
||||
|
||||
# from a derived Dockerfile, can use `RUN plugins.sh active.txt` to setup /usr/share/jenkins/ref/plugins from a support bundle
|
||||
COPY plugins.sh /usr/local/bin/plugins.sh
|
||||
COPY install-plugins.sh /usr/local/bin/install-plugins.sh
|
||||
|
||||
# Only need below if we are starting from empty jenkins_home
|
||||
## Copy the RSA keys
|
||||
#RUN mkdir -p /var/jenkins_home/.ssh
|
||||
#RUN chown jenkins:jenkins /var/jenkins_home/.ssh
|
||||
#COPY keys/id_rsa /var/jenkins_home/.ssh/id_rsa.pub
|
||||
#COPY keys/id_rsa /var/jenkins_home/.ssh/id_rsa
|
||||
#COPY keys/known_hosts /var/jenkins_home/.ssh/known_hosts
|
||||
#
|
||||
#USER root
|
||||
#RUN chmod 600 /var/jenkins_home/.ssh/id_rsa
|
||||
#RUN chmod 644 /var/jenkins_home/.ssh/id_rsa.pub
|
||||
## ssh-keyscan -H github.com >> ~/.ssh/known_hosts
|
||||
## ssh-keyscan -H bitbucket.org >> ~/.ssh/known_hosts
|
38
jenkins/Jenkinsfile
vendored
Normal file
38
jenkins/Jenkinsfile
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env groovy
|
||||
|
||||
properties([
|
||||
buildDiscarder(logRotator(numToKeepStr: '5', artifactNumToKeepStr: '5')),
|
||||
pipelineTriggers([cron('@daily')]),
|
||||
])
|
||||
|
||||
node('docker') {
|
||||
deleteDir()
|
||||
|
||||
stage('Checkout') {
|
||||
checkout scm
|
||||
}
|
||||
|
||||
if (!infra.isTrusted()) {
|
||||
/* Outside of the trusted.ci environment, we're building and testing
|
||||
* the Dockerful in this repository, but not publishing to docker hub
|
||||
*/
|
||||
stage('Build') {
|
||||
docker.build('jenkins')
|
||||
}
|
||||
|
||||
stage('Test') {
|
||||
sh """
|
||||
git submodule update --init --recursive
|
||||
git clone https://github.com/sstephenson/bats.git
|
||||
bats/bin/bats tests
|
||||
"""
|
||||
}
|
||||
} else {
|
||||
/* In our trusted.ci environment we only want to be publishing our
|
||||
* containers from artifacts
|
||||
*/
|
||||
stage('Publish') {
|
||||
sh './publish.sh'
|
||||
}
|
||||
}
|
||||
}
|
226
jenkins/README.md
Normal file
226
jenkins/README.md
Normal file
@ -0,0 +1,226 @@
|
||||
# Official Jenkins Docker image
|
||||
|
||||
The Jenkins Continuous Integration and Delivery server.
|
||||
|
||||
This is a fully functional Jenkins server, based on the Long Term Support release.
|
||||
[http://jenkins.io/](http://jenkins.io/).
|
||||
|
||||
For weekly releases check out [`jenkinsci/jenkins`](https://hub.docker.com/r/jenkinsci/jenkins/)
|
||||
|
||||
|
||||
<img src="http://jenkins-ci.org/sites/default/files/jenkins_logo.png"/>
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
```
|
||||
docker run -p 8080:8080 -p 50000:50000 jenkins
|
||||
```
|
||||
|
||||
NOTE: read below the _build executors_ part for the role of the `50000` port mapping.
|
||||
|
||||
This will store the workspace in /var/jenkins_home. All Jenkins data lives in there - including plugins and configuration.
|
||||
You will probably want to make that an explicit volume so you can manage it and attach to another container for upgrades :
|
||||
|
||||
```
|
||||
docker run -p 8080:8080 -p 50000:50000 -v jenkins_home:/var/jenkins_home jenkins
|
||||
```
|
||||
|
||||
this will automatically create a 'jenkins_home' volume on docker host, that will survive container stop/restart/deletion.
|
||||
|
||||
Avoid using a bind mount from a folder on host into `/var/jenkins_home`, as this might result in file permission issue. If you _really_ need to bind mount jenkins_home, ensure that directory on host is accessible by the jenkins user in container (jenkins user - uid 1000) or use `-u some_other_user` parameter with `docker run`.
|
||||
|
||||
## Backing up data
|
||||
|
||||
If you bind mount in a volume - you can simply back up that directory
|
||||
(which is jenkins_home) at any time.
|
||||
|
||||
This is highly recommended. Treat the jenkins_home directory as you would a database - in Docker you would generally put a database on a volume.
|
||||
|
||||
If your volume is inside a container - you can use ```docker cp $ID:/var/jenkins_home``` command to extract the data, or other options to find where the volume data is.
|
||||
Note that some symlinks on some OSes may be converted to copies (this can confuse jenkins with lastStableBuild links etc)
|
||||
|
||||
For more info check Docker docs section on [Managing data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/)
|
||||
|
||||
# Setting the number of executors
|
||||
|
||||
You can specify and set the number of executors of your Jenkins master instance using a groovy script. By default its set to 2 executors, but you can extend the image and change it to your desired number of executors :
|
||||
|
||||
`executors.groovy`
|
||||
```
|
||||
import jenkins.model.*
|
||||
Jenkins.instance.setNumExecutors(5)
|
||||
```
|
||||
|
||||
and `Dockerfile`
|
||||
|
||||
```
|
||||
FROM jenkins
|
||||
COPY executors.groovy /usr/share/jenkins/ref/init.groovy.d/executors.groovy
|
||||
```
|
||||
|
||||
|
||||
# Attaching build executors
|
||||
|
||||
You can run builds on the master out of the box.
|
||||
|
||||
But if you want to attach build slave servers **through JNLP (Java Web Start)**: make sure you map the port: ```-p 50000:50000``` - which will be used when you connect a slave agent.
|
||||
|
||||
If you are only using [SSH slaves](https://wiki.jenkins-ci.org/display/JENKINS/SSH+Slaves+plugin), then you do **NOT** need to put that port mapping.
|
||||
|
||||
# Passing JVM parameters
|
||||
|
||||
You might need to customize the JVM running Jenkins, typically to pass system properties or tweak heap memory settings. Use JAVA_OPTS environment
|
||||
variable for this purpose :
|
||||
|
||||
```
|
||||
docker run --name myjenkins -p 8080:8080 -p 50000:50000 --env JAVA_OPTS=-Dhudson.footerURL=http://mycompany.com jenkins
|
||||
```
|
||||
|
||||
# Configuring logging
|
||||
|
||||
Jenkins logging can be configured through a properties file and `java.util.logging.config.file` Java property.
|
||||
For example:
|
||||
|
||||
```
|
||||
mkdir data
|
||||
cat > data/log.properties <<EOF
|
||||
handlers=java.util.logging.ConsoleHandler
|
||||
jenkins.level=FINEST
|
||||
java.util.logging.ConsoleHandler.level=FINEST
|
||||
EOF
|
||||
docker run --name myjenkins -p 8080:8080 -p 50000:50000 --env JAVA_OPTS="-Djava.util.logging.config.file=/var/jenkins_home/log.properties" -v `pwd`/data:/var/jenkins_home jenkins
|
||||
```
|
||||
|
||||
# Configuring reverse proxy
|
||||
If you want to install Jenkins behind a reverse proxy with prefix, example: mysite.com/jenkins, you need to add environnement variable `JENKINS_OPTS="--prefix=/jenkins"` and then follow the below procedures to configure your reverse proxy, which will depend if you have Apache ou Nginx:
|
||||
- [Apache](https://wiki.jenkins-ci.org/display/JENKINS/Running+Jenkins+behind+Apache)
|
||||
- [Nginx](https://wiki.jenkins-ci.org/display/JENKINS/Jenkins+behind+an+NGinX+reverse+proxy)
|
||||
|
||||
# Passing Jenkins launcher parameters
|
||||
|
||||
Argument you pass to docker running the jenkins image are passed to jenkins launcher, so you can run for sample :
|
||||
```
|
||||
docker run jenkins --version
|
||||
```
|
||||
This will dump Jenkins version, just like when you run jenkins as an executable war.
|
||||
|
||||
You also can define jenkins arguments as `JENKINS_OPTS`. This is usefull to define a set of arguments to pass to jenkins launcher as you
|
||||
define a derived jenkins image based on the official one with some customized settings. The following sample Dockerfile uses this option
|
||||
to force use of HTTPS with a certificate included in the image
|
||||
|
||||
```
|
||||
FROM jenkins:1.565.3
|
||||
|
||||
COPY https.pem /var/lib/jenkins/cert
|
||||
COPY https.key /var/lib/jenkins/pk
|
||||
ENV JENKINS_OPTS --httpPort=-1 --httpsPort=8083 --httpsCertificate=/var/lib/jenkins/cert --httpsPrivateKey=/var/lib/jenkins/pk
|
||||
EXPOSE 8083
|
||||
```
|
||||
|
||||
You can also change the default slave agent port for jenkins by defining `JENKINS_SLAVE_AGENT_PORT` in a sample Dockerfile.
|
||||
|
||||
```
|
||||
FROM jenkins:1.565.3
|
||||
ENV JENKINS_SLAVE_AGENT_PORT 50001
|
||||
```
|
||||
or as a parameter to docker,
|
||||
```
|
||||
docker run --name myjenkins -p 8080:8080 -p 50001:50001 --env JENKINS_SLAVE_AGENT_PORT=50001 jenkins
|
||||
```
|
||||
|
||||
# Installing more tools
|
||||
|
||||
You can run your container as root - and install via apt-get, install as part of build steps via jenkins tool installers, or you can create your own Dockerfile to customise, for example:
|
||||
|
||||
```
|
||||
FROM jenkins
|
||||
# if we want to install via apt
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y ruby make more-thing-here
|
||||
# drop back to the regular jenkins user - good practice
|
||||
USER jenkins
|
||||
```
|
||||
|
||||
In such a derived image, you can customize your jenkins instance with hook scripts or additional plugins.
|
||||
For this purpose, use `/usr/share/jenkins/ref` as a place to define the default JENKINS_HOME content you
|
||||
wish the target installation to look like :
|
||||
|
||||
```
|
||||
FROM jenkins
|
||||
COPY custom.groovy /usr/share/jenkins/ref/init.groovy.d/custom.groovy
|
||||
```
|
||||
|
||||
## Preinstalling plugins
|
||||
|
||||
You can rely on the `install-plugins.sh` script to pass a set of plugins to download with their dependencies.
|
||||
Use plugin artifact ID, whithout `-plugin` extension, and append the version if needed separated by `:`.
|
||||
Dependencies that are already included in the Jenkins war will only be downloaded if their required version is newer than the one included.
|
||||
|
||||
```
|
||||
FROM jenkins
|
||||
RUN /usr/local/bin/install-plugins.sh docker-slaves github-branch-source:1.8
|
||||
```
|
||||
|
||||
When jenkins container starts, it will check `JENKINS_HOME` has this reference content, and copy them
|
||||
there if required. It will not override such files, so if you upgraded some plugins from UI they won't
|
||||
be reverted on next start.
|
||||
|
||||
In case you *do* want to override, append '.override' to the name of the reference file. E.g. a file named
|
||||
`/usr/share/jenkins/ref/config.xml.override` will overwrite an existing `config.xml` file in JENKINS_HOME.
|
||||
|
||||
Also see [JENKINS-24986](https://issues.jenkins-ci.org/browse/JENKINS-24986)
|
||||
|
||||
|
||||
Here is an example to get the list of plugins from an existing server:
|
||||
|
||||
```
|
||||
JENKINS_HOST=username:password@myhost.com:port
|
||||
curl -sSL "http://$JENKINS_HOST/pluginManager/api/xml?depth=1&xpath=/*/*/shortName|/*/*/version&wrapper=plugins" | perl -pe 's/.*?<shortName>([\w-]+).*?<version>([^<]+)()(<\/\w+>)+/\1 \2\n/g'|sed 's/ /:/'
|
||||
```
|
||||
|
||||
Example Output:
|
||||
|
||||
```
|
||||
cucumber-testresult-plugin:0.8.2
|
||||
pam-auth:1.1
|
||||
matrix-project:1.4.1
|
||||
script-security:1.13
|
||||
...
|
||||
```
|
||||
|
||||
For 2.x-derived images, you may also want to
|
||||
|
||||
RUN echo 2.0 > /usr/share/jenkins/ref/jenkins.install.UpgradeWizard.state
|
||||
|
||||
to indicate that this Jenkins installation is fully configured.
|
||||
Otherwise a banner will appear prompting the user to install additional plugins,
|
||||
which may be inappropriate.
|
||||
|
||||
# Upgrading
|
||||
|
||||
All the data needed is in the /var/jenkins_home directory - so depending on how you manage that - depends on how you upgrade. Generally - you can copy it out - and then "docker pull" the image again - and you will have the latest LTS - you can then start up with -v pointing to that data (/var/jenkins_home) and everything will be as you left it.
|
||||
|
||||
As always - please ensure that you know how to drive docker - especially volume handling!
|
||||
|
||||
## Upgrading plugins
|
||||
|
||||
By default, plugins will be upgraded if they haven't been upgraded manually and if the version from the docker image is newer than the version in the container. Versions installed by the docker image are tracked through a marker file.
|
||||
|
||||
The default behaviour when upgrading from a docker image that didn't write marker files is to leave existing plugins in place. If you want to upgrade existing plugins without marker you may run the docker image with `-e TRY_UPGRADE_IF_NO_MARKER=true`. Then plugins will be upgraded if the version provided by the docker image is newer.
|
||||
|
||||
# Building
|
||||
|
||||
Build with the usual
|
||||
|
||||
docker build -t jenkins .
|
||||
|
||||
Tests are written using [bats](https://github.com/sstephenson/bats) under the `tests` dir
|
||||
|
||||
bats tests
|
||||
|
||||
Bats can be easily installed with `brew install bats` on OS X
|
||||
|
||||
# Questions?
|
||||
|
||||
Jump on irc.freenode.net and the #jenkins room. Ask!
|
14
jenkins/docker-compose.yml
Normal file
14
jenkins/docker-compose.yml
Normal file
@ -0,0 +1,14 @@
|
||||
master:
|
||||
build: .
|
||||
environment:
|
||||
JAVA_OPTS: "-Djava.awt.headless=true"
|
||||
ports:
|
||||
- "50000:50000"
|
||||
# Expose Jenkins to parent on port 8090
|
||||
- "8090:8080"
|
||||
# Allow Docker In Docker
|
||||
privileged: true
|
||||
volumes:
|
||||
- ./jenkins_home:/var/jenkins_home
|
||||
# Allow Docker In Docker to use parent docker container
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
12
jenkins/init.groovy
Normal file
12
jenkins/init.groovy
Normal file
@ -0,0 +1,12 @@
|
||||
import hudson.model.*;
|
||||
import jenkins.model.*;
|
||||
|
||||
|
||||
Thread.start {
|
||||
sleep 10000
|
||||
println "--> setting agent port for jnlp"
|
||||
def env = System.getenv()
|
||||
int port = env['JENKINS_SLAVE_AGENT_PORT'].toInteger()
|
||||
Jenkins.instance.setSlaveAgentPort(port)
|
||||
println "--> setting agent port for jnlp... done"
|
||||
}
|
205
jenkins/install-plugins.sh
Normal file
205
jenkins/install-plugins.sh
Normal file
@ -0,0 +1,205 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# Resolve dependencies and download plugins given on the command line
|
||||
#
|
||||
# FROM jenkins
|
||||
# RUN install-plugins.sh docker-slaves github-branch-source
|
||||
|
||||
set -o pipefail
|
||||
|
||||
REF_DIR=${REF:-/usr/share/jenkins/ref/plugins}
|
||||
FAILED="$REF_DIR/failed-plugins.txt"
|
||||
|
||||
. /usr/local/bin/jenkins-support
|
||||
|
||||
getLockFile() {
|
||||
printf '%s' "$REF_DIR/${1}.lock"
|
||||
}
|
||||
|
||||
getArchiveFilename() {
|
||||
printf '%s' "$REF_DIR/${1}.jpi"
|
||||
}
|
||||
|
||||
download() {
|
||||
local plugin originalPlugin version lock ignoreLockFile
|
||||
plugin="$1"
|
||||
version="${2:-latest}"
|
||||
ignoreLockFile="${3:-}"
|
||||
lock="$(getLockFile "$plugin")"
|
||||
|
||||
if [[ $ignoreLockFile ]] || mkdir "$lock" &>/dev/null; then
|
||||
if ! doDownload "$plugin" "$version"; then
|
||||
# some plugin don't follow the rules about artifact ID
|
||||
# typically: docker-plugin
|
||||
originalPlugin="$plugin"
|
||||
plugin="${plugin}-plugin"
|
||||
if ! doDownload "$plugin" "$version"; then
|
||||
echo "Failed to download plugin: $originalPlugin or $plugin" >&2
|
||||
echo "Not downloaded: ${originalPlugin}" >> "$FAILED"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! checkIntegrity "$plugin"; then
|
||||
echo "Downloaded file is not a valid ZIP: $(getArchiveFilename "$plugin")" >&2
|
||||
echo "Download integrity: ${plugin}" >> "$FAILED"
|
||||
return 1
|
||||
fi
|
||||
|
||||
resolveDependencies "$plugin"
|
||||
fi
|
||||
}
|
||||
|
||||
doDownload() {
|
||||
local plugin version url jpi
|
||||
plugin="$1"
|
||||
version="$2"
|
||||
jpi="$(getArchiveFilename "$plugin")"
|
||||
|
||||
# If plugin already exists and is the same version do not download
|
||||
if test -f "$jpi" && unzip -p "$jpi" META-INF/MANIFEST.MF | tr -d '\r' | grep "^Plugin-Version: ${version}$" > /dev/null; then
|
||||
echo "Using provided plugin: $plugin"
|
||||
return 0
|
||||
fi
|
||||
|
||||
JENKINS_UC_DOWNLOAD=${JENKINS_UC_DOWNLOAD:-"$JENKINS_UC/download"}
|
||||
|
||||
url="$JENKINS_UC_DOWNLOAD/plugins/$plugin/$version/${plugin}.hpi"
|
||||
|
||||
echo "Downloading plugin: $plugin from $url"
|
||||
curl --connect-timeout ${CURL_CONNECTION_TIMEOUT:-20} --retry ${CURL_RETRY:-5} --retry-delay ${CURL_RETRY_DELAY:-0} --retry-max-time ${CURL_RETRY_MAX_TIME:-60} -s -f -L "$url" -o "$jpi"
|
||||
return $?
|
||||
}
|
||||
|
||||
checkIntegrity() {
|
||||
local plugin jpi
|
||||
plugin="$1"
|
||||
jpi="$(getArchiveFilename "$plugin")"
|
||||
|
||||
unzip -t -qq "$jpi" >/dev/null
|
||||
return $?
|
||||
}
|
||||
|
||||
resolveDependencies() {
|
||||
local plugin jpi dependencies
|
||||
plugin="$1"
|
||||
jpi="$(getArchiveFilename "$plugin")"
|
||||
|
||||
dependencies="$(unzip -p "$jpi" META-INF/MANIFEST.MF | tr -d '\r' | tr '\n' '|' | sed -e 's#| ##g' | tr '|' '\n' | grep "^Plugin-Dependencies: " | sed -e 's#^Plugin-Dependencies: ##')"
|
||||
|
||||
if [[ ! $dependencies ]]; then
|
||||
echo " > $plugin has no dependencies"
|
||||
return
|
||||
fi
|
||||
|
||||
echo " > $plugin depends on $dependencies"
|
||||
|
||||
IFS=',' read -r -a array <<< "$dependencies"
|
||||
|
||||
for d in "${array[@]}"
|
||||
do
|
||||
plugin="$(cut -d':' -f1 - <<< "$d")"
|
||||
if [[ $d == *"resolution:=optional"* ]]; then
|
||||
echo "Skipping optional dependency $plugin"
|
||||
else
|
||||
local pluginInstalled
|
||||
if pluginInstalled="$(echo "${bundledPlugins}" | grep "^${plugin}:")"; then
|
||||
pluginInstalled="${pluginInstalled//[$'\r']}"
|
||||
local versionInstalled; versionInstalled=$(versionFromPlugin "${pluginInstalled}")
|
||||
local minVersion; minVersion=$(versionFromPlugin "${d}")
|
||||
if versionLT "${versionInstalled}" "${minVersion}"; then
|
||||
echo "Upgrading bundled dependency $d ($minVersion > $versionInstalled)"
|
||||
download "$plugin" &
|
||||
else
|
||||
echo "Skipping already bundled dependency $d ($minVersion <= $versionInstalled)"
|
||||
fi
|
||||
else
|
||||
download "$plugin" &
|
||||
fi
|
||||
fi
|
||||
done
|
||||
wait
|
||||
}
|
||||
|
||||
bundledPlugins() {
|
||||
local JENKINS_WAR=/usr/share/jenkins/jenkins.war
|
||||
if [ -f $JENKINS_WAR ]
|
||||
then
|
||||
TEMP_PLUGIN_DIR=/tmp/plugintemp.$$
|
||||
for i in $(jar tf $JENKINS_WAR | egrep '[^detached-]plugins.*\..pi' | sort)
|
||||
do
|
||||
rm -fr $TEMP_PLUGIN_DIR
|
||||
mkdir -p $TEMP_PLUGIN_DIR
|
||||
PLUGIN=$(basename "$i"|cut -f1 -d'.')
|
||||
(cd $TEMP_PLUGIN_DIR;jar xf "$JENKINS_WAR" "$i";jar xvf "$TEMP_PLUGIN_DIR/$i" META-INF/MANIFEST.MF >/dev/null 2>&1)
|
||||
VER=$(egrep -i Plugin-Version "$TEMP_PLUGIN_DIR/META-INF/MANIFEST.MF"|cut -d: -f2|sed 's/ //')
|
||||
echo "$PLUGIN:$VER"
|
||||
done
|
||||
rm -fr $TEMP_PLUGIN_DIR
|
||||
else
|
||||
rm -f "$TEMP_ALREADY_INSTALLED"
|
||||
echo "ERROR file not found: $JENKINS_WAR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
versionFromPlugin() {
|
||||
local plugin=$1
|
||||
if [[ $plugin =~ .*:.* ]]; then
|
||||
echo "${plugin##*:}"
|
||||
else
|
||||
echo "latest"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
installedPlugins() {
|
||||
for f in "$REF_DIR"/*.jpi; do
|
||||
echo "$(basename "$f" | sed -e 's/\.jpi//'):$(get_plugin_version "$f")"
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
local plugin version
|
||||
|
||||
mkdir -p "$REF_DIR" || exit 1
|
||||
|
||||
# Create lockfile manually before first run to make sure any explicit version set is used.
|
||||
echo "Creating initial locks..."
|
||||
for plugin in "$@"; do
|
||||
mkdir "$(getLockFile "${plugin%%:*}")"
|
||||
done
|
||||
|
||||
echo "Analyzing war..."
|
||||
bundledPlugins="$(bundledPlugins)"
|
||||
|
||||
echo "Downloading plugins..."
|
||||
for plugin in "$@"; do
|
||||
version=""
|
||||
|
||||
if [[ $plugin =~ .*:.* ]]; then
|
||||
version=$(versionFromPlugin "${plugin}")
|
||||
plugin="${plugin%%:*}"
|
||||
fi
|
||||
|
||||
download "$plugin" "$version" "true" &
|
||||
done
|
||||
wait
|
||||
|
||||
echo
|
||||
echo "WAR bundled plugins:"
|
||||
echo "${bundledPlugins}"
|
||||
echo
|
||||
echo "Installed plugins:"
|
||||
installedPlugins
|
||||
|
||||
if [[ -f $FAILED ]]; then
|
||||
echo "Some plugins failed to download!" "$(<"$FAILED")" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Cleaning up locks"
|
||||
rm -r "$REF_DIR"/*.lock
|
||||
}
|
||||
|
||||
main "$@"
|
127
jenkins/jenkins-support
Normal file
127
jenkins/jenkins-support
Normal file
@ -0,0 +1,127 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# compare if version1 < version2
|
||||
versionLT() {
|
||||
local v1; v1=$(echo "$1" | cut -d '-' -f 1 )
|
||||
local q1; q1=$(echo "$1" | cut -s -d '-' -f 2- )
|
||||
local v2; v2=$(echo "$2" | cut -d '-' -f 1 )
|
||||
local q2; q2=$(echo "$2" | cut -s -d '-' -f 2- )
|
||||
if [ "$v1" = "$v2" ]; then
|
||||
if [ "$q1" = "$q2" ]; then
|
||||
return 1
|
||||
else
|
||||
if [ -z "$q1" ]; then
|
||||
return 1
|
||||
else
|
||||
if [ -z "$q2" ]; then
|
||||
return 0
|
||||
else
|
||||
[ "$q1" = "$(echo -e "$q1\n$q2" | sort -V | head -n1)" ]
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
[ "$v1" = "$(echo -e "$v1\n$v2" | sort -V | head -n1)" ]
|
||||
fi
|
||||
}
|
||||
|
||||
# returns a plugin version from a plugin archive
|
||||
get_plugin_version() {
|
||||
local archive; archive=$1
|
||||
local version; version=$(unzip -p "$archive" META-INF/MANIFEST.MF | grep "^Plugin-Version: " | sed -e 's#^Plugin-Version: ##')
|
||||
version=${version%%[[:space:]]}
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Copy files from /usr/share/jenkins/ref into $JENKINS_HOME
|
||||
# So the initial JENKINS-HOME is set with expected content.
|
||||
# Don't override, as this is just a reference setup, and use from UI
|
||||
# can then change this, upgrade plugins, etc.
|
||||
copy_reference_file() {
|
||||
f="${1%/}"
|
||||
b="${f%.override}"
|
||||
rel="${b:23}"
|
||||
version_marker="${rel}.version_from_image"
|
||||
dir=$(dirname "${b}")
|
||||
local action;
|
||||
local reason;
|
||||
local container_version;
|
||||
local image_version;
|
||||
local marker_version;
|
||||
local log; log=false
|
||||
if [[ ${rel} == plugins/*.jpi ]]; then
|
||||
container_version=$(get_plugin_version "$JENKINS_HOME/${rel}")
|
||||
image_version=$(get_plugin_version "${f}")
|
||||
if [[ -e $JENKINS_HOME/${version_marker} ]]; then
|
||||
marker_version=$(cat "$JENKINS_HOME/${version_marker}")
|
||||
if versionLT "$marker_version" "$container_version"; then
|
||||
action="SKIPPED"
|
||||
reason="Installed version ($container_version) has been manually upgraded from initial version ($marker_version)"
|
||||
log=true
|
||||
else
|
||||
if [[ "$image_version" == "$container_version" ]]; then
|
||||
action="SKIPPED"
|
||||
reason="Version from image is the same as the installed version $image_version"
|
||||
else
|
||||
if versionLT "$image_version" "$container_version"; then
|
||||
action="SKIPPED"
|
||||
log=true
|
||||
reason="Image version ($image_version) is older than installed version ($container_version)"
|
||||
else
|
||||
action="UPGRADED"
|
||||
log=true
|
||||
reason="Image version ($image_version) is newer than installed version ($container_version)"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if [[ -n "$TRY_UPGRADE_IF_NO_MARKER" ]]; then
|
||||
if [[ "$image_version" == "$container_version" ]]; then
|
||||
action="SKIPPED"
|
||||
reason="Version from image is the same as the installed version $image_version (no marker found)"
|
||||
# Add marker for next time
|
||||
echo "$image_version" > "$JENKINS_HOME/${version_marker}"
|
||||
else
|
||||
if versionLT "$image_version" "$container_version"; then
|
||||
action="SKIPPED"
|
||||
log=true
|
||||
reason="Image version ($image_version) is older than installed version ($container_version) (no marker found)"
|
||||
else
|
||||
action="UPGRADED"
|
||||
log=true
|
||||
reason="Image version ($image_version) is newer than installed version ($container_version) (no marker found)"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [[ ! -e $JENKINS_HOME/${rel} || "$action" == "UPGRADED" || $f = *.override ]]; then
|
||||
action=${action:-"INSTALLED"}
|
||||
log=true
|
||||
mkdir -p "$JENKINS_HOME/${dir:23}"
|
||||
cp -r "${f}" "$JENKINS_HOME/${rel}";
|
||||
# pin plugins on initial copy
|
||||
touch "$JENKINS_HOME/${rel}.pinned"
|
||||
echo "$image_version" > "$JENKINS_HOME/${version_marker}"
|
||||
reason=${reason:-$image_version}
|
||||
else
|
||||
action=${action:-"SKIPPED"}
|
||||
fi
|
||||
else
|
||||
if [[ ! -e $JENKINS_HOME/${rel} || $f = *.override ]]
|
||||
then
|
||||
action="INSTALLED"
|
||||
log=true
|
||||
mkdir -p "$JENKINS_HOME/${dir:23}"
|
||||
cp -r "${f}" "$JENKINS_HOME/${rel}";
|
||||
else
|
||||
action="SKIPPED"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$VERBOSE" || "$log" == "true" ]]; then
|
||||
if [ -z "$reason" ]; then
|
||||
echo "$action $rel" >> "$COPY_REFERENCE_FILE_LOG"
|
||||
else
|
||||
echo "$action $rel : $reason" >> "$COPY_REFERENCE_FILE_LOG"
|
||||
fi
|
||||
fi
|
||||
}
|
26
jenkins/jenkins.sh
Normal file
26
jenkins/jenkins.sh
Normal file
@ -0,0 +1,26 @@
|
||||
#! /bin/bash -e
|
||||
|
||||
: "${JENKINS_HOME:="/var/jenkins_home"}"
|
||||
touch "${COPY_REFERENCE_FILE_LOG}" || { echo "Can not write to ${COPY_REFERENCE_FILE_LOG}. Wrong volume permissions?"; exit 1; }
|
||||
echo "--- Copying files at $(date)" >> "$COPY_REFERENCE_FILE_LOG"
|
||||
find /usr/share/jenkins/ref/ -type f -exec bash -c '. /usr/local/bin/jenkins-support; for arg; do copy_reference_file "$arg"; done' _ {} +
|
||||
|
||||
# if `docker run` first argument start with `--` the user is passing jenkins launcher arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
|
||||
# read JAVA_OPTS and JENKINS_OPTS into arrays to avoid need for eval (and associated vulnerabilities)
|
||||
java_opts_array=()
|
||||
while IFS= read -r -d '' item; do
|
||||
java_opts_array+=( "$item" )
|
||||
done < <([[ $JAVA_OPTS ]] && xargs printf '%s\0' <<<"$JAVA_OPTS")
|
||||
|
||||
jenkins_opts_array=( )
|
||||
while IFS= read -r -d '' item; do
|
||||
jenkins_opts_array+=( "$item" )
|
||||
done < <([[ $JENKINS_OPTS ]] && xargs printf '%s\0' <<<"$JENKINS_OPTS")
|
||||
|
||||
exec java "${java_opts_array[@]}" -jar /usr/share/jenkins/jenkins.war "${jenkins_opts_array[@]}" "$@"
|
||||
fi
|
||||
|
||||
# As argument is not jenkins, assume user want to run his own process, for example a `bash` shell to explore this image
|
||||
exec "$@"
|
28
jenkins/jenkins_home/.gitignore
vendored
Normal file
28
jenkins/jenkins_home/.gitignore
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# File Patterns to Ignore
|
||||
logs
|
||||
*.log
|
||||
*.log.*
|
||||
*.swp
|
||||
|
||||
# Ignore SSH Config
|
||||
.ssh/*
|
||||
!.ssh/known_hosts
|
||||
|
||||
# Include job configs and ignore other data
|
||||
!config.xml
|
||||
builds
|
||||
lastStable
|
||||
lastSuccessful
|
||||
nextBuildNumber
|
||||
|
||||
# Ignore expanded plugins folders because we only want jpi files
|
||||
plugins/*
|
||||
!plugins/*.jpi
|
||||
|
||||
# Include user info
|
||||
!userContent/*
|
||||
!users/*
|
||||
|
||||
# Ignore Directories
|
||||
workspace/
|
||||
war/
|
124
jenkins/plugins.sh
Normal file
124
jenkins/plugins.sh
Normal file
@ -0,0 +1,124 @@
|
||||
#! /bin/bash
|
||||
|
||||
# Parse a support-core plugin -style txt file as specification for jenkins plugins to be installed
|
||||
# in the reference directory, so user can define a derived Docker image with just :
|
||||
#
|
||||
# FROM jenkins
|
||||
# COPY plugins.txt /plugins.txt
|
||||
# RUN /usr/local/bin/plugins.sh /plugins.txt
|
||||
#
|
||||
# Note: Plugins already installed are skipped
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
echo "WARN: plugins.sh is deprecated, please switch to install-plugins.sh"
|
||||
|
||||
if [ -z "$1" ]
|
||||
then
|
||||
echo "
|
||||
USAGE:
|
||||
Parse a support-core plugin -style txt file as specification for jenkins plugins to be installed
|
||||
in the reference directory, so user can define a derived Docker image with just :
|
||||
|
||||
FROM jenkins
|
||||
COPY plugins.txt /plugins.txt
|
||||
RUN /usr/local/bin/plugins.sh /plugins.txt
|
||||
|
||||
Note: Plugins already installed are skipped
|
||||
|
||||
"
|
||||
exit 1
|
||||
else
|
||||
JENKINS_INPUT_JOB_LIST=$1
|
||||
if [ ! -f "$JENKINS_INPUT_JOB_LIST" ]
|
||||
then
|
||||
echo "ERROR File not found: $JENKINS_INPUT_JOB_LIST"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# the war includes a # of plugins, to make the build efficient filter out
|
||||
# the plugins so we dont install 2x - there about 17!
|
||||
if [ -d "$JENKINS_HOME" ]
|
||||
then
|
||||
TEMP_ALREADY_INSTALLED=$JENKINS_HOME/preinstalled.plugins.$$.txt
|
||||
else
|
||||
echo "ERROR $JENKINS_HOME not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
JENKINS_PLUGINS_DIR=/var/jenkins_home/plugins
|
||||
if [ -d "$JENKINS_PLUGINS_DIR" ]
|
||||
then
|
||||
echo "Analyzing: $JENKINS_PLUGINS_DIR"
|
||||
for i in "$JENKINS_PLUGINS_DIR"/*/; do
|
||||
JENKINS_PLUGIN=$(basename "$i")
|
||||
JENKINS_PLUGIN_VER=$(egrep -i Plugin-Version "$i/META-INF/MANIFEST.MF"|cut -d: -f2|sed 's/ //')
|
||||
echo "$JENKINS_PLUGIN:$JENKINS_PLUGIN_VER"
|
||||
done >"$TEMP_ALREADY_INSTALLED"
|
||||
else
|
||||
JENKINS_WAR=/usr/share/jenkins/jenkins.war
|
||||
if [ -f "$JENKINS_WAR" ]
|
||||
then
|
||||
echo "Analyzing war: $JENKINS_WAR"
|
||||
TEMP_PLUGIN_DIR=/tmp/plugintemp.$$
|
||||
while read -r i <&3; do
|
||||
rm -fr "$TEMP_PLUGIN_DIR"
|
||||
mkdir -p "$TEMP_PLUGIN_DIR"
|
||||
PLUGIN=$(basename "$i"|cut -f1 -d'.')
|
||||
(cd "$TEMP_PLUGIN_DIR" || exit; jar xf "$JENKINS_WAR" "$i"; jar xvf "$TEMP_PLUGIN_DIR/$i" META-INF/MANIFEST.MF >/dev/null 2>&1)
|
||||
VER=$(egrep -i Plugin-Version "$TEMP_PLUGIN_DIR/META-INF/MANIFEST.MF"|cut -d: -f2|sed 's/ //')
|
||||
echo "$PLUGIN:$VER"
|
||||
done 3< <(jar tf "$JENKINS_WAR" | egrep '[^detached-]plugins.*\..pi' | sort) > "$TEMP_ALREADY_INSTALLED"
|
||||
rm -fr "$TEMP_PLUGIN_DIR"
|
||||
else
|
||||
rm -f "$TEMP_ALREADY_INSTALLED"
|
||||
echo "ERROR file not found: $JENKINS_WAR"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
REF=/usr/share/jenkins/ref/plugins
|
||||
mkdir -p $REF
|
||||
COUNT_PLUGINS_INSTALLED=0
|
||||
while read -r spec || [ -n "$spec" ]; do
|
||||
|
||||
plugin=(${spec//:/ });
|
||||
[[ ${plugin[0]} =~ ^# ]] && continue
|
||||
[[ ${plugin[0]} =~ ^[[:space:]]*$ ]] && continue
|
||||
[[ -z ${plugin[1]} ]] && plugin[1]="latest"
|
||||
|
||||
if [ -z "$JENKINS_UC_DOWNLOAD" ]; then
|
||||
JENKINS_UC_DOWNLOAD=$JENKINS_UC/download
|
||||
fi
|
||||
|
||||
if ! grep -q "${plugin[0]}:${plugin[1]}" "$TEMP_ALREADY_INSTALLED"
|
||||
then
|
||||
echo "Downloading ${plugin[0]}:${plugin[1]}"
|
||||
curl --retry 3 --retry-delay 5 -sSL -f "${JENKINS_UC_DOWNLOAD}/plugins/${plugin[0]}/${plugin[1]}/${plugin[0]}.hpi" -o "$REF/${plugin[0]}.jpi"
|
||||
unzip -qqt "$REF/${plugin[0]}.jpi"
|
||||
(( COUNT_PLUGINS_INSTALLED += 1 ))
|
||||
else
|
||||
echo " ... skipping already installed: ${plugin[0]}:${plugin[1]}"
|
||||
fi
|
||||
done < "$JENKINS_INPUT_JOB_LIST"
|
||||
|
||||
echo "---------------------------------------------------"
|
||||
if (( "$COUNT_PLUGINS_INSTALLED" > 0 ))
|
||||
then
|
||||
echo "INFO: Successfully installed $COUNT_PLUGINS_INSTALLED plugins."
|
||||
|
||||
if [ -d $JENKINS_PLUGINS_DIR ]
|
||||
then
|
||||
echo "INFO: Please restart the container for changes to take effect!"
|
||||
fi
|
||||
else
|
||||
echo "INFO: No changes, all plugins previously installed."
|
||||
|
||||
fi
|
||||
echo "---------------------------------------------------"
|
||||
|
||||
#cleanup
|
||||
rm "$TEMP_ALREADY_INSTALLED"
|
||||
exit 0
|
148
jenkins/publish.sh
Normal file
148
jenkins/publish.sh
Normal file
@ -0,0 +1,148 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# Publish any versions of the docker image not yet pushed to jenkinsci/jenkins
|
||||
# Arguments:
|
||||
# -n dry run, do not build or publish images
|
||||
|
||||
set -o pipefail
|
||||
|
||||
sort-versions() {
|
||||
if [ "$(uname)" == 'Darwin' ]; then
|
||||
gsort --version-sort
|
||||
else
|
||||
sort --version-sort
|
||||
fi
|
||||
}
|
||||
|
||||
# Try tagging with and without -f to support all versions of docker
|
||||
docker-tag() {
|
||||
local from="jenkinsci/jenkins:$1"
|
||||
local to="jenkinsci/jenkins:$2"
|
||||
local out
|
||||
if out=$(docker tag -f "$from" "$to" 2>&1); then
|
||||
echo "$out"
|
||||
else
|
||||
docker tag "$from" "$to"
|
||||
fi
|
||||
}
|
||||
|
||||
get-variant() {
|
||||
local branch
|
||||
branch=$(git show-ref | grep $(git rev-list -n 1 HEAD) | tail -1 | rev | cut -d/ -f 1 | rev)
|
||||
if [ -z "$branch" ]; then
|
||||
>&2 echo "Could not get the current branch name for commit, not in a branch?: $(git rev-list -n 1 HEAD)"
|
||||
return 1
|
||||
fi
|
||||
case "$branch" in
|
||||
master) echo "" ;;
|
||||
*) echo "-${branch}" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
login-token() {
|
||||
# could use jq .token
|
||||
curl -q -sSL https://auth.docker.io/token\?service\=registry.docker.io\&scope\=repository:jenkinsci/jenkins:pull | grep -o '"token":"[^"]*"' | cut -d':' -f 2 | xargs echo
|
||||
}
|
||||
|
||||
is-published() {
|
||||
get-manifest "$1" &> /dev/null
|
||||
}
|
||||
|
||||
get-manifest() {
|
||||
local tag=$1
|
||||
curl -q -fsSL -H "Accept: application/vnd.docker.distribution.manifest.v2+json" -H "Authorization: Bearer $TOKEN" "https://index.docker.io/v2/jenkinsci/jenkins/manifests/$tag"
|
||||
}
|
||||
|
||||
get-digest() {
|
||||
#get-manifest "$1" | jq .config.digest
|
||||
get-manifest "$1" | grep -A 10 -o '"config".*' | grep digest | head -1 | cut -d':' -f 2,3 | xargs echo
|
||||
}
|
||||
|
||||
get-latest-versions() {
|
||||
curl -q -fsSL https://api.github.com/repos/jenkinsci/jenkins/tags?per_page=20 | grep '"name": "jenkins-' | egrep -o '[0-9]+(\.[0-9]+)+' | sort-versions | uniq
|
||||
}
|
||||
|
||||
publish() {
|
||||
local version=$1
|
||||
local variant=$2
|
||||
local tag="${version}${variant}"
|
||||
local sha
|
||||
local build_opts="--no-cache --pull"
|
||||
|
||||
sha=$(curl -q -fsSL "http://repo.jenkins-ci.org/simple/releases/org/jenkins-ci/main/jenkins-war/${version}/jenkins-war-${version}.war.sha1")
|
||||
|
||||
docker build --build-arg "JENKINS_VERSION=$version" \
|
||||
--build-arg "JENKINS_SHA=$sha" \
|
||||
--tag "jenkinsci/jenkins:${tag}" ${build_opts} .
|
||||
|
||||
docker push "jenkinsci/jenkins:${tag}"
|
||||
}
|
||||
|
||||
tag-and-push() {
|
||||
local source=$1
|
||||
local target=$2
|
||||
local digest_source; digest_source=$(get-digest ${tag1})
|
||||
local digest_target; digest_target=$(get-digest ${tag2})
|
||||
if [ "$digest_source" == "$digest_target" ]; then
|
||||
echo "Images ${source} [$digest_source] and ${target} [$digest_target] are already the same, not updating tags"
|
||||
else
|
||||
echo "Creating tag ${target} pointing to ${source}"
|
||||
if [ ! "$dry_run" = true ]; then
|
||||
docker-tag "jenkinsci/jenkins:${source}" "jenkinsci/jenkins:${target}"
|
||||
docker push "jenkinsci/jenkins:${source}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
publish-latest() {
|
||||
local version=$1
|
||||
local variant=$2
|
||||
|
||||
# push latest (for master) or the name of the branch (for other branches)
|
||||
if [ -z "${variant}" ]; then
|
||||
tag-and-push "${version}${variant}" "latest"
|
||||
else
|
||||
tag-and-push "${version}${variant}" "${variant#-}"
|
||||
fi
|
||||
}
|
||||
|
||||
publish-lts() {
|
||||
local version=$1
|
||||
local variant=$2
|
||||
tag-and-push "${version}" "lts${variant}"
|
||||
}
|
||||
|
||||
dry_run=false
|
||||
if [ "-n" == "${1:-}" ]; then
|
||||
dry_run=true
|
||||
fi
|
||||
if [ "$dry_run" = true ]; then
|
||||
echo "Dry run, will not build or publish images"
|
||||
fi
|
||||
|
||||
TOKEN=$(login-token)
|
||||
|
||||
variant=$(get-variant)
|
||||
|
||||
lts_version=""
|
||||
version=""
|
||||
for version in $(get-latest-versions); do
|
||||
if is-published "$version$variant"; then
|
||||
echo "Tag is already published: $version$variant"
|
||||
else
|
||||
echo "Publishing version: $version$variant"
|
||||
if [ ! "$dry_run" = true ]; then
|
||||
publish "$version" "$variant"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update lts tag
|
||||
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
lts_version="${version}"
|
||||
fi
|
||||
done
|
||||
|
||||
publish-latest "${version}" "${variant}"
|
||||
if [ -n "${lts_version}" ]; then
|
||||
publish-lts "${lts_version}" "${variant}"
|
||||
fi
|
31
jenkins/tests/functions.bats
Normal file
31
jenkins/tests/functions.bats
Normal file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
SUT_IMAGE=bats-jenkins
|
||||
|
||||
load 'test_helper/bats-support/load'
|
||||
load 'test_helper/bats-assert/load'
|
||||
load test_helpers
|
||||
|
||||
. $BATS_TEST_DIRNAME/../jenkins-support
|
||||
|
||||
@test "build image" {
|
||||
cd $BATS_TEST_DIRNAME/..
|
||||
docker_build -t $SUT_IMAGE .
|
||||
}
|
||||
|
||||
@test "versionLT" {
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0 1.0"
|
||||
assert_failure
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0 1.1"
|
||||
assert_success
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.1 1.0"
|
||||
assert_failure
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0-beta-1 1.0"
|
||||
assert_success
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0 1.0-beta-1"
|
||||
assert_failure
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0-alpha-1 1.0-beta-1"
|
||||
assert_success
|
||||
run docker run --rm $SUT_IMAGE bash -c "source /usr/local/bin/jenkins-support && versionLT 1.0-beta-1 1.0-alpha-1"
|
||||
assert_failure
|
||||
}
|
118
jenkins/tests/install-plugins.bats
Normal file
118
jenkins/tests/install-plugins.bats
Normal file
@ -0,0 +1,118 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
SUT_IMAGE=bats-jenkins
|
||||
|
||||
load 'test_helper/bats-support/load'
|
||||
load 'test_helper/bats-assert/load'
|
||||
load test_helpers
|
||||
|
||||
@test "build image" {
|
||||
cd $BATS_TEST_DIRNAME/..
|
||||
docker_build -t $SUT_IMAGE .
|
||||
}
|
||||
|
||||
@test "plugins are installed with plugins.sh" {
|
||||
run docker build -t $SUT_IMAGE-plugins $BATS_TEST_DIRNAME/plugins
|
||||
assert_success
|
||||
# replace DOS line endings \r\n
|
||||
run bash -c "docker run --rm $SUT_IMAGE-plugins ls --color=never -1 /var/jenkins_home/plugins | tr -d '\r'"
|
||||
assert_success
|
||||
assert_line 'maven-plugin.jpi'
|
||||
assert_line 'maven-plugin.jpi.pinned'
|
||||
assert_line 'ant.jpi'
|
||||
assert_line 'ant.jpi.pinned'
|
||||
}
|
||||
|
||||
@test "plugins are installed with install-plugins.sh" {
|
||||
run docker build -t $SUT_IMAGE-install-plugins $BATS_TEST_DIRNAME/install-plugins
|
||||
assert_success
|
||||
refute_line --partial 'Skipping already bundled dependency'
|
||||
# replace DOS line endings \r\n
|
||||
run bash -c "docker run --rm $SUT_IMAGE-install-plugins ls --color=never -1 /var/jenkins_home/plugins | tr -d '\r'"
|
||||
assert_success
|
||||
assert_line 'maven-plugin.jpi'
|
||||
assert_line 'maven-plugin.jpi.pinned'
|
||||
assert_line 'ant.jpi'
|
||||
assert_line 'ant.jpi.pinned'
|
||||
assert_line 'credentials.jpi'
|
||||
assert_line 'credentials.jpi.pinned'
|
||||
assert_line 'mesos.jpi'
|
||||
assert_line 'mesos.jpi.pinned'
|
||||
# optional dependencies
|
||||
refute_line 'metrics.jpi'
|
||||
refute_line 'metrics.jpi.pinned'
|
||||
# plugins bundled but under detached-plugins, so need to be installed
|
||||
assert_line 'javadoc.jpi'
|
||||
assert_line 'javadoc.jpi.pinned'
|
||||
assert_line 'mailer.jpi'
|
||||
assert_line 'mailer.jpi.pinned'
|
||||
}
|
||||
|
||||
@test "plugins are installed with install-plugins.sh even when already exist" {
|
||||
run docker build -t $SUT_IMAGE-install-plugins-update --no-cache $BATS_TEST_DIRNAME/install-plugins/update
|
||||
assert_success
|
||||
assert_line "Using provided plugin: ant"
|
||||
refute_line --partial 'Skipping already bundled dependency'
|
||||
# replace DOS line endings \r\n
|
||||
run bash -c "docker run --rm $SUT_IMAGE-install-plugins-update unzip -p /var/jenkins_home/plugins/maven-plugin.jpi META-INF/MANIFEST.MF | tr -d '\r'"
|
||||
assert_success
|
||||
assert_line 'Plugin-Version: 2.13'
|
||||
}
|
||||
|
||||
@test "plugins are getting upgraded but not downgraded" {
|
||||
# Initial execution
|
||||
run docker build -t $SUT_IMAGE-install-plugins $BATS_TEST_DIRNAME/install-plugins
|
||||
assert_success
|
||||
local work; work="$BATS_TEST_DIRNAME/upgrade-plugins/work"
|
||||
mkdir -p $work
|
||||
# Image contains maven-plugin 2.7.1 and ant-plugin 1.3
|
||||
run bash -c "docker run -u $UID -v $work:/var/jenkins_home --rm $SUT_IMAGE-install-plugins true"
|
||||
assert_success
|
||||
run unzip_manifest maven-plugin.jpi $work
|
||||
assert_line 'Plugin-Version: 2.7.1'
|
||||
run unzip_manifest ant.jpi $work
|
||||
assert_line 'Plugin-Version: 1.3'
|
||||
|
||||
# Upgrade to new image with different plugins
|
||||
run docker build -t $SUT_IMAGE-upgrade-plugins $BATS_TEST_DIRNAME/upgrade-plugins
|
||||
assert_success
|
||||
# Images contains maven-plugin 2.13 and ant-plugin 1.2
|
||||
run bash -c "docker run -u $UID -v $work:/var/jenkins_home --rm $SUT_IMAGE-upgrade-plugins true"
|
||||
assert_success
|
||||
run unzip_manifest maven-plugin.jpi $work
|
||||
assert_success
|
||||
# Should be updated
|
||||
assert_line 'Plugin-Version: 2.13'
|
||||
run unzip_manifest ant.jpi $work
|
||||
# 1.2 is older than the existing 1.3, so keep 1.3
|
||||
assert_line 'Plugin-Version: 1.3'
|
||||
}
|
||||
|
||||
@test "clean work directory" {
|
||||
run bash -c "rm -rf $BATS_TEST_DIRNAME/upgrade-plugins/work"
|
||||
}
|
||||
|
||||
@test "do not upgrade if plugin has been manually updated" {
|
||||
run docker build -t $SUT_IMAGE-install-plugins $BATS_TEST_DIRNAME/install-plugins
|
||||
assert_success
|
||||
local work; work="$BATS_TEST_DIRNAME/upgrade-plugins/work"
|
||||
mkdir -p $work
|
||||
# Image contains maven-plugin 2.7.1 and ant-plugin 1.3
|
||||
run bash -c "docker run -u $UID -v $work:/var/jenkins_home --rm $SUT_IMAGE-install-plugins curl --connect-timeout 20 --retry 5 --retry-delay 0 --retry-max-time 60 -s -f -L https://updates.jenkins.io/download/plugins/maven-plugin/2.12.1/maven-plugin.hpi -o /var/jenkins_home/plugins/maven-plugin.jpi"
|
||||
assert_success
|
||||
run unzip_manifest maven-plugin.jpi $work
|
||||
assert_line 'Plugin-Version: 2.12.1'
|
||||
run docker build -t $SUT_IMAGE-upgrade-plugins $BATS_TEST_DIRNAME/upgrade-plugins
|
||||
assert_success
|
||||
# Images contains maven-plugin 2.13 and ant-plugin 1.2
|
||||
run bash -c "docker run -u $UID -v $work:/var/jenkins_home --rm $SUT_IMAGE-upgrade-plugins true"
|
||||
assert_success
|
||||
run unzip_manifest maven-plugin.jpi $work
|
||||
assert_success
|
||||
# Shouldn't be updated
|
||||
refute_line 'Plugin-Version: 2.13'
|
||||
}
|
||||
|
||||
@test "clean work directory" {
|
||||
run bash -c "rm -rf $BATS_TEST_DIRNAME/upgrade-plugins/work"
|
||||
}
|
3
jenkins/tests/install-plugins/Dockerfile
Normal file
3
jenkins/tests/install-plugins/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM bats-jenkins
|
||||
|
||||
RUN /usr/local/bin/install-plugins.sh maven-plugin:2.7.1 ant:1.3 mesos:0.13.0
|
3
jenkins/tests/install-plugins/update/Dockerfile
Normal file
3
jenkins/tests/install-plugins/update/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM bats-jenkins-install-plugins
|
||||
|
||||
RUN /usr/local/bin/install-plugins.sh maven-plugin:2.13 ant:1.3
|
4
jenkins/tests/plugins/Dockerfile
Normal file
4
jenkins/tests/plugins/Dockerfile
Normal file
@ -0,0 +1,4 @@
|
||||
FROM bats-jenkins
|
||||
|
||||
COPY plugins.txt /usr/share/jenkins/ref/
|
||||
RUN /usr/local/bin/plugins.sh /usr/share/jenkins/ref/plugins.txt
|
2
jenkins/tests/plugins/plugins.txt
Normal file
2
jenkins/tests/plugins/plugins.txt
Normal file
@ -0,0 +1,2 @@
|
||||
maven-plugin:2.7.1
|
||||
ant:1.3
|
56
jenkins/tests/runtime.bats
Normal file
56
jenkins/tests/runtime.bats
Normal file
@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
SUT_IMAGE=bats-jenkins
|
||||
SUT_CONTAINER=bats-jenkins
|
||||
|
||||
load 'test_helper/bats-support/load'
|
||||
load 'test_helper/bats-assert/load'
|
||||
load test_helpers
|
||||
|
||||
@test "build image" {
|
||||
cd $BATS_TEST_DIRNAME/..
|
||||
docker_build -t $SUT_IMAGE .
|
||||
}
|
||||
|
||||
@test "clean test containers" {
|
||||
cleanup $SUT_CONTAINER
|
||||
}
|
||||
|
||||
@test "test multiple JENKINS_OPTS" {
|
||||
# running --help --version should return the version, not the help
|
||||
local version=$(grep 'ENV JENKINS_VERSION' Dockerfile | sed -e 's/.*:-\(.*\)}/\1/')
|
||||
# need the last line of output
|
||||
assert "${version}" docker run --rm -e JENKINS_OPTS="--help --version" --name $SUT_CONTAINER -P $SUT_IMAGE | tail -n 1
|
||||
}
|
||||
|
||||
@test "test jenkins arguments" {
|
||||
# running --help --version should return the version, not the help
|
||||
local version=$(grep 'ENV JENKINS_VERSION' Dockerfile | sed -e 's/.*:-\(.*\)}/\1/')
|
||||
# need the last line of output
|
||||
assert "${version}" docker run --rm --name $SUT_CONTAINER -P $SUT_IMAGE --help --version | tail -n 1
|
||||
}
|
||||
|
||||
@test "create test container" {
|
||||
docker run -d -e JAVA_OPTS="-Duser.timezone=Europe/Madrid -Dhudson.model.DirectoryBrowserSupport.CSP=\"default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline';\"" --name $SUT_CONTAINER -P $SUT_IMAGE
|
||||
}
|
||||
|
||||
@test "test container is running" {
|
||||
sleep 1 # give time to eventually fail to initialize
|
||||
retry 3 1 assert "true" docker inspect -f {{.State.Running}} $SUT_CONTAINER
|
||||
}
|
||||
|
||||
@test "Jenkins is initialized" {
|
||||
retry 30 5 test_url /api/json
|
||||
}
|
||||
|
||||
@test "JAVA_OPTS are set" {
|
||||
local sed_expr='s/<wbr>//g;s/<td class="pane">.*<\/td><td class.*normal">//g;s/<t.>//g;s/<\/t.>//g'
|
||||
assert 'default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline';' \
|
||||
bash -c "curl -fsSL --user \"admin:$(get_jenkins_password)\" $(get_jenkins_url)/systemInfo | sed 's/<\/tr>/<\/tr>\'$'\n/g' | grep '<td class=\"pane\">hudson.model.DirectoryBrowserSupport.CSP</td>' | sed -e '${sed_expr}'"
|
||||
assert 'Europe/Madrid' \
|
||||
bash -c "curl -fsSL --user \"admin:$(get_jenkins_password)\" $(get_jenkins_url)/systemInfo | sed 's/<\/tr>/<\/tr>\'$'\n/g' | grep '<td class=\"pane\">user.timezone</td>' | sed -e '${sed_expr}'"
|
||||
}
|
||||
|
||||
@test "clean test containers" {
|
||||
cleanup $SUT_CONTAINER
|
||||
}
|
84
jenkins/tests/test_helpers.bash
Normal file
84
jenkins/tests/test_helpers.bash
Normal file
@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
|
||||
# check dependencies
|
||||
(
|
||||
type docker &>/dev/null || ( echo "docker is not available"; exit 1 )
|
||||
type curl &>/dev/null || ( echo "curl is not available"; exit 1 )
|
||||
)>&2
|
||||
|
||||
# Assert that $1 is the outputof a command $2
|
||||
function assert {
|
||||
local expected_output=$1
|
||||
shift
|
||||
local actual_output
|
||||
actual_output=$("$@")
|
||||
actual_output="${actual_output//[$'\t\r\n']}" # remove newlines
|
||||
if ! [ "$actual_output" = "$expected_output" ]; then
|
||||
echo "expected: \"$expected_output\""
|
||||
echo "actual: \"$actual_output\""
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
# Retry a command $1 times until it succeeds. Wait $2 seconds between retries.
|
||||
function retry {
|
||||
local attempts=$1
|
||||
shift
|
||||
local delay=$1
|
||||
shift
|
||||
local i
|
||||
|
||||
for ((i=0; i < attempts; i++)); do
|
||||
run "$@"
|
||||
if [ "$status" -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
sleep $delay
|
||||
done
|
||||
|
||||
echo "Command \"$*\" failed $attempts times. Status: $status. Output: $output" >&2
|
||||
false
|
||||
}
|
||||
|
||||
function docker_build {
|
||||
if [ -n "$JENKINS_VERSION" ]; then
|
||||
docker build --build-arg JENKINS_VERSION=$JENKINS_VERSION --build-arg JENKINS_SHA=$JENKINS_SHA "$@"
|
||||
else
|
||||
docker build "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_jenkins_url {
|
||||
if [ -z "${DOCKER_HOST}" ]; then
|
||||
DOCKER_IP=localhost
|
||||
else
|
||||
DOCKER_IP=$(echo "$DOCKER_HOST" | sed -e 's|tcp://\(.*\):[0-9]*|\1|')
|
||||
fi
|
||||
echo "http://$DOCKER_IP:$(docker port "$SUT_CONTAINER" 8080 | cut -d: -f2)"
|
||||
}
|
||||
|
||||
function get_jenkins_password {
|
||||
docker logs "$SUT_CONTAINER" 2>&1 | grep -A 2 "Please use the following password to proceed to installation" | tail -n 1
|
||||
}
|
||||
|
||||
function test_url {
|
||||
run curl --user "admin:$(get_jenkins_password)" --output /dev/null --silent --head --fail --connect-timeout 30 --max-time 60 "$(get_jenkins_url)$1"
|
||||
if [ "$status" -eq 0 ]; then
|
||||
true
|
||||
else
|
||||
echo "URL $(get_jenkins_url)$1 failed" >&2
|
||||
echo "output: $output" >&2
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
docker kill "$1" &>/dev/null ||:
|
||||
docker rm -fv "$1" &>/dev/null ||:
|
||||
}
|
||||
|
||||
function unzip_manifest {
|
||||
local plugin=$1
|
||||
local work=$2
|
||||
bash -c "docker run --rm -v $work:/var/jenkins_home --entrypoint unzip $SUT_IMAGE -p /var/jenkins_home/plugins/$plugin META-INF/MANIFEST.MF | tr -d '\r'"
|
||||
}
|
3
jenkins/tests/upgrade-plugins/Dockerfile
Normal file
3
jenkins/tests/upgrade-plugins/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM bats-jenkins
|
||||
|
||||
RUN /usr/local/bin/install-plugins.sh maven-plugin:2.13 ant:1.2
|
36
jenkins/update-official-library.sh
Normal file
36
jenkins/update-official-library.sh
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# Generate the Docker official-images file
|
||||
|
||||
sha() {
|
||||
local branch=$1
|
||||
git rev-parse $branch
|
||||
}
|
||||
|
||||
version_from_dockerfile() {
|
||||
local branch=$1
|
||||
git show $branch:Dockerfile | grep JENKINS_VERSION: | sed -e 's/.*:-\(.*\)}/\1/'
|
||||
}
|
||||
|
||||
master_sha=$(sha master)
|
||||
alpine_sha=$(sha alpine)
|
||||
|
||||
master_version=$(version_from_dockerfile master)
|
||||
alpine_version=$(version_from_dockerfile alpine)
|
||||
|
||||
if ! [ "$master_version" == "$alpine_version" ]; then
|
||||
echo "Master version '$master_version' does not match alpine version '$alpine_version'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat << EOF > ../official-images/library/jenkins
|
||||
# maintainer: Nicolas De Loof <nicolas.deloof@gmail.com> (@ndeloof)
|
||||
# maintainer: Michael Neale <mneale@cloudbees.com> (@michaelneale)
|
||||
# maintainer: Carlos Sanchez <csanchez@cloudbees.com> (@carlossg)
|
||||
|
||||
latest: git://github.com/jenkinsci/jenkins-ci.org-docker@$master_sha
|
||||
$master_version: git://github.com/jenkinsci/jenkins-ci.org-docker@$master_sha
|
||||
|
||||
alpine: git://github.com/jenkinsci/jenkins-ci.org-docker@$alpine_sha
|
||||
$alpine_version-alpine: git://github.com/jenkinsci/jenkins-ci.org-docker@$alpine_sha
|
||||
EOF
|
0
logs/apache2/.gitkeep
Normal file
0
logs/apache2/.gitkeep
Normal file
0
logs/nginx/.gitkeep
Normal file
0
logs/nginx/.gitkeep
Normal file
@ -1,6 +1,6 @@
|
||||
FROM mailhog/mailhog
|
||||
|
||||
Maintainer Mahmoud Zalt <mahmoud@zalt.me>
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
CMD ["Mailhog"]
|
||||
|
||||
|
23
mssql/Dockerfile
Normal file
23
mssql/Dockerfile
Normal file
@ -0,0 +1,23 @@
|
||||
FROM microsoft/mssql-server-linux
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
# Create config directory
|
||||
# an set it as WORKDIR
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Bundle app source
|
||||
COPY . /usr/src/app
|
||||
|
||||
RUN chmod +x /usr/src/app/create_table.sh
|
||||
|
||||
ENV MSSQL_DATABASE=$MSSQL_DATABASE
|
||||
ENV ACCEPT_EULA=Y
|
||||
ENV SA_PASSWORD=$MSSQL_PASSWORD
|
||||
|
||||
VOLUME /var/opt/mssql
|
||||
|
||||
EXPOSE 1433
|
||||
|
||||
CMD /bin/bash ./entrypoint.sh
|
5
mssql/create_table.sh
Normal file
5
mssql/create_table.sh
Normal file
@ -0,0 +1,5 @@
|
||||
#wait for the SQL Server to come up
|
||||
sleep 45s
|
||||
|
||||
#run the setup script to create the DB and the schema in the DB
|
||||
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d master -i setup.sql
|
2
mssql/entrypoint.sh
Normal file
2
mssql/entrypoint.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#start SQL Server, start the script to create the DB and import the data, start the app
|
||||
/opt/mssql/bin/sqlservr.sh & /usr/src/app/create_table.sh & tail -f /dev/null
|
4
mssql/setup.sql
Normal file
4
mssql/setup.sql
Normal file
@ -0,0 +1,4 @@
|
||||
CREATE DATABASE $(MSSQL_DATABASE);
|
||||
GO
|
||||
USE $(MSSQL_DATABASE);
|
||||
GO
|
@ -2,26 +2,10 @@ FROM mysql:5.7
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
ADD startup /etc/mysql/startup
|
||||
|
||||
RUN chown -R mysql:root /var/lib/mysql/
|
||||
|
||||
ARG MYSQL_DATABASE=default
|
||||
ARG MYSQL_USER=default
|
||||
ARG MYSQL_PASSWORD=secret
|
||||
ARG MYSQL_ROOT_PASSWORD=root
|
||||
|
||||
ENV MYSQL_DATABASE=$MYSQL_DATABASE
|
||||
ENV MYSQL_USER=$MYSQL_USER
|
||||
ENV MYSQL_PASSWORD=$MYSQL_PASSWORD
|
||||
ENV MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD
|
||||
|
||||
RUN sed -i 's/MYSQL_DATABASE/'$MYSQL_DATABASE'/g' /etc/mysql/startup && \
|
||||
sed -i 's/MYSQL_USER/'$MYSQL_USER'/g' /etc/mysql/startup && \
|
||||
sed -i 's/MYSQL_PASSWORD/'$MYSQL_PASSWORD'/g' /etc/mysql/startup
|
||||
|
||||
ADD my.cnf /etc/mysql/conf.d/my.cnf
|
||||
|
||||
CMD ["mysqld", "--init-file=/etc/mysql/startup"]
|
||||
CMD ["mysqld"]
|
||||
|
||||
EXPOSE 3306
|
||||
|
@ -1,4 +0,0 @@
|
||||
DROP USER IF EXISTS 'MYSQL_USER';
|
||||
CREATE USER 'MYSQL_USER'@'%' IDENTIFIED BY 'MYSQL_PASSWORD';
|
||||
CREATE DATABASE IF NOT EXISTS `MYSQL_DATABASE`;
|
||||
GRANT ALL ON `MYSQL_DATABASE`.* TO 'MYSQL_USER'@'%';
|
@ -19,8 +19,8 @@ http {
|
||||
client_max_body_size 20M;
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stderr;
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
# https://hub.docker.com/r/laradock/php-fpm/tags/
|
||||
#
|
||||
|
||||
FROM laradock/php-fpm:5.6--1.2
|
||||
FROM laradock/php-fpm:5.6--1.4
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
@ -65,6 +65,18 @@ RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
# Copy xdebug configration for remote debugging
|
||||
COPY ./xdebug.ini /usr/local/etc/php/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# PHP REDIS EXTENSION FOR PHP 5
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_PHPREDIS=false
|
||||
RUN if [ ${INSTALL_PHPREDIS} = true ]; then \
|
||||
# Install Php Redis Extension
|
||||
pecl install -o -f redis \
|
||||
&& rm -rf /tmp/pear \
|
||||
&& docker-php-ext-enable redis \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# MongoDB:
|
||||
#####################################
|
||||
@ -83,8 +95,7 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
ARG INSTALL_ZIP_ARCHIVE=false
|
||||
RUN if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \
|
||||
# Install the zip extension
|
||||
pecl install zip && \
|
||||
docker-php-ext-enable zip \
|
||||
docker-php-ext-install zip \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -108,6 +119,16 @@ RUN if [ ${INSTALL_MEMCACHED} = true ]; then \
|
||||
docker-php-ext-enable memcached \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Exif:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_EXIF=false
|
||||
RUN if [ ${INSTALL_EXIF} = true ]; then \
|
||||
# Enable Exif PHP extentions requirements
|
||||
docker-php-ext-install exif \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# PHP Aerospike:
|
||||
#####################################
|
||||
@ -137,8 +158,7 @@ RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
|
||||
ARG INSTALL_OPCACHE=false
|
||||
RUN if [ ${INSTALL_OPCACHE} = true ]; then \
|
||||
docker-php-ext-install opcache && \
|
||||
docker-php-ext-enable opcache \
|
||||
docker-php-ext-install opcache \
|
||||
;fi
|
||||
|
||||
# Copy opcache configration
|
||||
@ -149,7 +169,10 @@ COPY ./opcache.ini /usr/local/etc/php/conf.d/opcache.ini
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_MYSQLI=false
|
||||
# MySQL extenstion is available for Php5.6 and lower only
|
||||
COPY ./mysql.ini /usr/local/etc/php/conf.d/mysql.ini
|
||||
RUN if [ ${INSTALL_MYSQLI} = true ]; then \
|
||||
docker-php-ext-install mysql && \
|
||||
docker-php-ext-install mysqli \
|
||||
;fi
|
||||
|
||||
@ -162,6 +185,29 @@ RUN if [ ${INSTALL_TOKENIZER} = true ]; then \
|
||||
docker-php-ext-install tokenizer \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Human Language and Character Encoding Support:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_INTL=false
|
||||
RUN if [ ${INSTALL_INTL} = true ]; then \
|
||||
# Install intl and requirements
|
||||
apt-get install -y zlib1g-dev libicu-dev g++ && \
|
||||
docker-php-ext-configure intl && \
|
||||
docker-php-ext-install intl \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# GHOSTSCRIPT:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_GHOSTSCRIPT=false
|
||||
RUN if [ ${INSTALL_GHOSTSCRIPT} = true ]; then \
|
||||
# Install the ghostscript extension for PDF editing
|
||||
apt-get update && \
|
||||
apt-get install -y poppler-utils ghostscript \
|
||||
;fi
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Final Touch
|
||||
@ -171,7 +217,7 @@ RUN if [ ${INSTALL_TOKENIZER} = true ]; then \
|
||||
ADD ./laravel.ini /usr/local/etc/php/conf.d
|
||||
ADD ./laravel.pool.conf /usr/local/etc/php-fpm.d/
|
||||
|
||||
RUN rm -r /var/lib/apt/lists/*
|
||||
#RUN rm -r /var/lib/apt/lists/*
|
||||
|
||||
RUN usermod -u 1000 www-data
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
# https://hub.docker.com/r/laradock/php-fpm/tags/
|
||||
#
|
||||
|
||||
FROM laradock/php-fpm:7.0--1.2
|
||||
FROM laradock/php-fpm:7.0--1.4
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
@ -34,7 +34,6 @@ MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
# in the `docker-compose.yml` before the build.
|
||||
# Example:
|
||||
# - INSTALL_ZIP_ARCHIVE=true
|
||||
# - ...
|
||||
#
|
||||
|
||||
#####################################
|
||||
@ -60,9 +59,13 @@ RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
docker-php-ext-enable xdebug \
|
||||
;fi
|
||||
|
||||
# Copy xdebug configration for remote debugging
|
||||
COPY ./xdebug.ini /usr/local/etc/php/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# PHP REDIS EXTENSION FOR PHP 7.0
|
||||
# PHP REDIS EXTENSION FOR PHP 7
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_PHPREDIS=false
|
||||
RUN if [ ${INSTALL_PHPREDIS} = true ]; then \
|
||||
# Install Php Redis Extension
|
||||
@ -71,9 +74,6 @@ RUN if [ ${INSTALL_PHPREDIS} = true ]; then \
|
||||
&& docker-php-ext-enable redis \
|
||||
;fi
|
||||
|
||||
# Copy xdebug configration for remote debugging
|
||||
COPY ./xdebug.ini /usr/local/etc/php/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# MongoDB:
|
||||
#####################################
|
||||
@ -92,8 +92,7 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
ARG INSTALL_ZIP_ARCHIVE=false
|
||||
RUN if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \
|
||||
# Install the zip extension
|
||||
pecl install zip && \
|
||||
docker-php-ext-enable zip \
|
||||
docker-php-ext-install zip \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -135,11 +134,9 @@ RUN if [ ${INSTALL_MEMCACHED} = true ]; then \
|
||||
ARG INSTALL_EXIF=false
|
||||
RUN if [ ${INSTALL_EXIF} = true ]; then \
|
||||
# Enable Exif PHP extentions requirements
|
||||
docker-php-ext-install exif && \
|
||||
docker-php-ext-enable exif \
|
||||
docker-php-ext-install exif \
|
||||
;fi
|
||||
|
||||
|
||||
#####################################
|
||||
# PHP Aerospike:
|
||||
#####################################
|
||||
@ -166,10 +163,10 @@ RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
#####################################
|
||||
# Opcache:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_OPCACHE=false
|
||||
RUN if [ ${INSTALL_OPCACHE} = true ]; then \
|
||||
docker-php-ext-install opcache && \
|
||||
docker-php-ext-enable opcache \
|
||||
docker-php-ext-install opcache \
|
||||
;fi
|
||||
|
||||
# Copy opcache configration
|
||||
@ -178,6 +175,7 @@ COPY ./opcache.ini /usr/local/etc/php/conf.d/opcache.ini
|
||||
#####################################
|
||||
# Mysqli Modifications:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_MYSQLI=false
|
||||
RUN if [ ${INSTALL_MYSQLI} = true ]; then \
|
||||
docker-php-ext-install mysqli \
|
||||
@ -195,6 +193,7 @@ RUN if [ ${INSTALL_TOKENIZER} = true ]; then \
|
||||
#####################################
|
||||
# SQL SERVER:
|
||||
#####################################
|
||||
|
||||
ARG MSSQL=false
|
||||
RUN if [ ${MSSQL} = true ]; then \
|
||||
|
||||
@ -243,8 +242,16 @@ RUN if [ ${MSSQL} = true ]; then \
|
||||
&& ldd lib64/libmsodbcsql-13.0.so.0.0 \
|
||||
&& ./install.sh install --accept-license \
|
||||
&& ls -l /opt/microsoft/msodbcsql/ \
|
||||
&& odbcinst -q -d -n "ODBC Driver 13 for SQL Server" && \
|
||||
&& odbcinst -q -d -n "ODBC Driver 13 for SQL Server" \
|
||||
|
||||
#####################################
|
||||
# Install pdo_dblib
|
||||
#####################################
|
||||
|
||||
&& docker-php-ext-install pdo \
|
||||
&& docker-php-ext-configure pdo_dblib --with-libdir=/lib/x86_64-linux-gnu \
|
||||
&& docker-php-ext-install pdo_dblib \
|
||||
&& docker-php-ext-enable pdo_dblib && \
|
||||
|
||||
#####################################
|
||||
# Install sqlsrv y pdo_sqlsrv
|
||||
@ -263,8 +270,6 @@ RUN if [ ${MSSQL} = true ]; then \
|
||||
&& locale-gen \
|
||||
;fi
|
||||
|
||||
|
||||
|
||||
#####################################
|
||||
# Human Language and Character Encoding Support:
|
||||
#####################################
|
||||
@ -281,8 +286,8 @@ RUN if [ ${INSTALL_INTL} = true ]; then \
|
||||
# GHOSTSCRIPT:
|
||||
#####################################
|
||||
|
||||
ARG GHOSTSCRIPT=false
|
||||
RUN if [ ${GHOSTSCRIPT} = true ]; then \
|
||||
ARG INSTALL_GHOSTSCRIPT=false
|
||||
RUN if [ ${INSTALL_GHOSTSCRIPT} = true ]; then \
|
||||
# Install the ghostscript extension
|
||||
# for PDF editing
|
||||
apt-get -y update \
|
||||
@ -291,7 +296,6 @@ RUN if [ ${GHOSTSCRIPT} = true ]; then \
|
||||
ghostscript \
|
||||
;fi
|
||||
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Final Touch
|
||||
@ -301,7 +305,7 @@ RUN if [ ${GHOSTSCRIPT} = true ]; then \
|
||||
ADD ./laravel.ini /usr/local/etc/php/conf.d
|
||||
ADD ./laravel.pool.conf /usr/local/etc/php-fpm.d/
|
||||
|
||||
RUN rm -r /var/lib/apt/lists/*
|
||||
#RUN rm -r /var/lib/apt/lists/*
|
||||
|
||||
RUN usermod -u 1000 www-data
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
# https://hub.docker.com/r/laradock/php-fpm/tags/
|
||||
#
|
||||
|
||||
FROM laradock/php-fpm:7.1--1.3
|
||||
FROM laradock/php-fpm:7.1--1.4
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
@ -34,9 +34,20 @@ MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
# in the `docker-compose.yml` before the build.
|
||||
# Example:
|
||||
# - INSTALL_ZIP_ARCHIVE=true
|
||||
# - ...
|
||||
#
|
||||
|
||||
#####################################
|
||||
# SOAP:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_SOAP=false
|
||||
RUN if [ ${INSTALL_SOAP} = true ]; then \
|
||||
# Install the soap extension
|
||||
apt-get -y update && \
|
||||
apt-get -y install libxml2-dev php-soap && \
|
||||
docker-php-ext-install soap \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# xDebug:
|
||||
#####################################
|
||||
@ -51,6 +62,18 @@ RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
# Copy xdebug configration for remote debugging
|
||||
COPY ./xdebug.ini /usr/local/etc/php/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# PHP REDIS EXTENSION FOR PHP 7
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_PHPREDIS=false
|
||||
RUN if [ ${INSTALL_PHPREDIS} = true ]; then \
|
||||
# Install Php Redis Extension
|
||||
pecl install -o -f redis \
|
||||
&& rm -rf /tmp/pear \
|
||||
&& docker-php-ext-enable redis \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# MongoDB:
|
||||
#####################################
|
||||
@ -69,8 +92,17 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
ARG INSTALL_ZIP_ARCHIVE=false
|
||||
RUN if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \
|
||||
# Install the zip extension
|
||||
pecl install zip && \
|
||||
docker-php-ext-enable zip \
|
||||
docker-php-ext-install zip \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# bcmath:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_BCMATH=false
|
||||
RUN if [ ${INSTALL_BCMATH} = true ]; then \
|
||||
# Install the bcmath extension
|
||||
docker-php-ext-install bcmath \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -95,6 +127,16 @@ RUN if [ ${INSTALL_MEMCACHED} = true ]; then \
|
||||
&& docker-php-ext-enable memcached \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Exif:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_EXIF=false
|
||||
RUN if [ ${INSTALL_EXIF} = true ]; then \
|
||||
# Enable Exif PHP extentions requirements
|
||||
docker-php-ext-install exif \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# PHP Aerospike:
|
||||
#####################################
|
||||
@ -115,15 +157,16 @@ RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
&& make install \
|
||||
) \
|
||||
&& rm /tmp/aerospike-client-php.tar.gz \
|
||||
&& docker-php-ext-enable aerospike \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Opcache:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_OPCACHE=false
|
||||
RUN if [ ${INSTALL_OPCACHE} = true ]; then \
|
||||
docker-php-ext-install opcache && \
|
||||
docker-php-ext-enable opcache \
|
||||
docker-php-ext-install opcache \
|
||||
;fi
|
||||
|
||||
# Copy opcache configration
|
||||
@ -132,6 +175,7 @@ COPY ./opcache.ini /usr/local/etc/php/conf.d/opcache.ini
|
||||
#####################################
|
||||
# Mysqli Modifications:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_MYSQLI=false
|
||||
RUN if [ ${INSTALL_MYSQLI} = true ]; then \
|
||||
docker-php-ext-install mysqli \
|
||||
@ -150,8 +194,8 @@ RUN if [ ${INSTALL_TOKENIZER} = true ]; then \
|
||||
# Human Language and Character Encoding Support:
|
||||
#####################################
|
||||
|
||||
ARG INTL=false
|
||||
RUN if [ ${INTL} = true ]; then \
|
||||
ARG INSTALL_INTL=false
|
||||
RUN if [ ${INSTALL_INTL} = true ]; then \
|
||||
# Install intl and requirements
|
||||
apt-get install -y zlib1g-dev libicu-dev g++ && \
|
||||
docker-php-ext-configure intl && \
|
||||
@ -162,8 +206,8 @@ RUN if [ ${INTL} = true ]; then \
|
||||
# GHOSTSCRIPT:
|
||||
#####################################
|
||||
|
||||
ARG GHOSTSCRIPT=false
|
||||
RUN if [ ${GHOSTSCRIPT} = true ]; then \
|
||||
ARG INSTALL_GHOSTSCRIPT=false
|
||||
RUN if [ ${INSTALL_GHOSTSCRIPT} = true ]; then \
|
||||
# Install the ghostscript extension
|
||||
# for PDF editing
|
||||
apt-get -y update \
|
||||
@ -181,7 +225,7 @@ RUN if [ ${GHOSTSCRIPT} = true ]; then \
|
||||
ADD ./laravel.ini /usr/local/etc/php/conf.d
|
||||
ADD ./laravel.pool.conf /usr/local/etc/php-fpm.d/
|
||||
|
||||
RUN rm -r /var/lib/apt/lists/*
|
||||
#RUN rm -r /var/lib/apt/lists/*
|
||||
|
||||
RUN usermod -u 1000 www-data
|
||||
|
||||
|
58
php-fpm/mysql.ini
Normal file
58
php-fpm/mysql.ini
Normal file
@ -0,0 +1,58 @@
|
||||
[MySQL]
|
||||
; Allow accessing, from PHP's perspective, local files with LOAD DATA statements
|
||||
; http://php.net/mysql.allow_local_infile
|
||||
mysql.allow_local_infile = On
|
||||
|
||||
; Allow or prevent persistent links.
|
||||
; http://php.net/mysql.allow-persistent
|
||||
mysql.allow_persistent = On
|
||||
|
||||
; If mysqlnd is used: Number of cache slots for the internal result set cache
|
||||
; http://php.net/mysql.cache_size
|
||||
mysql.cache_size = 2000
|
||||
|
||||
; Maximum number of persistent links. -1 means no limit.
|
||||
; http://php.net/mysql.max-persistent
|
||||
mysql.max_persistent = -1
|
||||
|
||||
; Maximum number of links (persistent + non-persistent). -1 means no limit.
|
||||
; http://php.net/mysql.max-links
|
||||
mysql.max_links = -1
|
||||
|
||||
; Default port number for mysql_connect(). If unset, mysql_connect() will use
|
||||
; the $MYSQL_TCP_PORT or the mysql-tcp entry in /etc/services or the
|
||||
; compile-time value defined MYSQL_PORT (in that order). Win32 will only look
|
||||
; at MYSQL_PORT.
|
||||
; http://php.net/mysql.default-port
|
||||
mysql.default_port =
|
||||
|
||||
; Default socket name for local MySQL connects. If empty, uses the built-in
|
||||
; MySQL defaults.
|
||||
; http://php.net/mysql.default-socket
|
||||
mysql.default_socket =
|
||||
|
||||
; Default host for mysql_connect() (doesn't apply in safe mode).
|
||||
; http://php.net/mysql.default-host
|
||||
mysql.default_host =
|
||||
|
||||
; Default user for mysql_connect() (doesn't apply in safe mode).
|
||||
; http://php.net/mysql.default-user
|
||||
mysql.default_user =
|
||||
|
||||
; Default password for mysql_connect() (doesn't apply in safe mode).
|
||||
; Note that this is generally a *bad* idea to store passwords in this file.
|
||||
; *Any* user with PHP access can run 'echo get_cfg_var("mysql.default_password")
|
||||
; and reveal this password! And of course, any users with read access to this
|
||||
; file will be able to reveal the password as well.
|
||||
; http://php.net/mysql.default-password
|
||||
mysql.default_password =
|
||||
|
||||
; Maximum time (in seconds) for connect timeout. -1 means no limit
|
||||
; http://php.net/mysql.connect-timeout
|
||||
mysql.connect_timeout = 60
|
||||
|
||||
; Trace mode. When trace_mode is active (=On), warnings for table/index scans and
|
||||
; SQL-Errors will be displayed.
|
||||
; http://php.net/mysql.trace-mode
|
||||
mysql.trace_mode = Off
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
FROM nielsvdoorn/laravel-supervisor
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Optional Supervisord Configuration
|
||||
|
@ -2,7 +2,7 @@
|
||||
nodaemon=true
|
||||
[program:laravel-worker]
|
||||
process_name=%(program_name)s_%(process_num)02d
|
||||
command=php /var/www/laravel/artisan queue:work --sleep=3 --tries=3 --daemon
|
||||
command=php /var/www/artisan queue:work --sleep=3 --tries=3 --daemon
|
||||
autostart=true
|
||||
autorestart=true
|
||||
numprocs=8
|
||||
|
@ -1,5 +1,7 @@
|
||||
FROM mdillon/postgis:latest
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
CMD ["postgres"]
|
||||
|
||||
EXPOSE 5432
|
@ -1,5 +1,7 @@
|
||||
FROM rabbitmq
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
RUN rabbitmq-plugins enable --offline rabbitmq_management
|
||||
|
||||
EXPOSE 15671 15672
|
||||
|
29
varnish/Dockerfile
Normal file
29
varnish/Dockerfile
Normal file
@ -0,0 +1,29 @@
|
||||
FROM debian:latest
|
||||
|
||||
MAINTAINER ZeroC0D3 Team<zeroc0d3.team@gmail.com>
|
||||
|
||||
# Set Environment Variables
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install Dependencies
|
||||
RUN apt-get update && apt-get install -y apt-utils && apt-get upgrade -y
|
||||
RUN mkdir /home/site && mkdir /home/site/cache
|
||||
RUN apt-get install -y varnish
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Setting Configurations
|
||||
ENV VARNISH_CONFIG /etc/varnish/default.vcl
|
||||
ENV CACHE_SIZE 128m
|
||||
ENV VARNISHD_PARAMS -p default_ttl=3600 -p default_grace=3600
|
||||
ENV VARNISH_PORT 6081
|
||||
ENV BACKEND_HOST localhost
|
||||
ENV BACKEND_PORT 80
|
||||
|
||||
ADD default.vcl /etc/varnish/default.vcl
|
||||
ADD start.sh /etc/varnish/start.sh
|
||||
|
||||
RUN chmod +x /etc/varnish/start.sh
|
||||
|
||||
CMD ["/etc/varnish/start.sh"]
|
||||
|
||||
EXPOSE 8080
|
415
varnish/default.vcl
Normal file
415
varnish/default.vcl
Normal file
@ -0,0 +1,415 @@
|
||||
vcl 4.0;
|
||||
# Based on: https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl
|
||||
|
||||
import std;
|
||||
import directors;
|
||||
|
||||
backend server1 { # Define one backend
|
||||
.host = "${BACKEND_HOST}"; # IP or Hostname of backend
|
||||
.port = "${BACKEND_PORT}"; # Port Apache or whatever is listening
|
||||
.max_connections = 300; # That's it
|
||||
|
||||
.probe = {
|
||||
#.url = "/"; # short easy way (GET /)
|
||||
# We prefer to only do a HEAD /
|
||||
.request =
|
||||
"HEAD / HTTP/1.1"
|
||||
"Host: ${BACKEND_HOST}"
|
||||
"Connection: close"
|
||||
"User-Agent: Varnish Health Probe";
|
||||
|
||||
.interval = 5s; # check the health of each backend every 5 seconds
|
||||
.timeout = 1s; # timing out after 1 second.
|
||||
.window = 5; # If 3 out of the last 5 polls succeeded the backend is considered healthy, otherwise it will be marked as sick
|
||||
.threshold = 3;
|
||||
}
|
||||
|
||||
.first_byte_timeout = 300s; # How long to wait before we receive a first byte from our backend?
|
||||
.connect_timeout = 5s; # How long to wait for a backend connection?
|
||||
.between_bytes_timeout = 2s; # How long to wait between bytes received from our backend?
|
||||
}
|
||||
|
||||
acl purge {
|
||||
# ACL we'll use later to allow purges
|
||||
"localhost";
|
||||
"127.0.0.1";
|
||||
"::1";
|
||||
}
|
||||
|
||||
#acl editors {
|
||||
# # ACL to honor the "Cache-Control: no-cache" header to force a refresh but only from selected IPs
|
||||
# "localhost";
|
||||
# "127.0.0.1";
|
||||
# "::1";
|
||||
#}
|
||||
|
||||
sub vcl_init {
|
||||
# Called when VCL is loaded, before any requests pass through it.
|
||||
# Typically used to initialize VMODs.
|
||||
|
||||
new vdir = directors.round_robin();
|
||||
vdir.add_backend(server1);
|
||||
# vdir.add_backend(servern);
|
||||
}
|
||||
|
||||
sub vcl_recv {
|
||||
# Called at the beginning of a request, after the complete request has been received and parsed.
|
||||
# Its purpose is to decide whether or not to serve the request, how to do it, and, if applicable,
|
||||
# which backend to use.
|
||||
# also used to modify the request
|
||||
|
||||
set req.backend_hint = vdir.backend(); # send all traffic to the vdir director
|
||||
|
||||
# Normalize the header, remove the port (in case you're testing this on various TCP ports)
|
||||
set req.http.Host = regsub(req.http.Host, ":[0-9]+", "");
|
||||
|
||||
# Remove the proxy header (see https://httpoxy.org/#mitigate-varnish)
|
||||
unset req.http.proxy;
|
||||
|
||||
# Normalize the query arguments
|
||||
set req.url = std.querysort(req.url);
|
||||
|
||||
# Allow purging
|
||||
if (req.method == "PURGE") {
|
||||
if (!client.ip ~ purge) { # purge is the ACL defined at the begining
|
||||
# Not from an allowed IP? Then die with an error.
|
||||
return (synth(405, "This IP is not allowed to send PURGE requests."));
|
||||
}
|
||||
# If you got this stage (and didn't error out above), purge the cached result
|
||||
return (purge);
|
||||
}
|
||||
|
||||
# Only deal with "normal" types
|
||||
if (req.method != "GET" &&
|
||||
req.method != "HEAD" &&
|
||||
req.method != "PUT" &&
|
||||
req.method != "POST" &&
|
||||
req.method != "TRACE" &&
|
||||
req.method != "OPTIONS" &&
|
||||
req.method != "PATCH" &&
|
||||
req.method != "DELETE") {
|
||||
# Non-RFC2616 or CONNECT which is weird.
|
||||
return (pipe);
|
||||
}
|
||||
|
||||
# Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
|
||||
if (req.http.Upgrade ~ "(?i)websocket") {
|
||||
return (pipe);
|
||||
}
|
||||
|
||||
# Only cache GET or HEAD requests. This makes sure the POST requests are always passed.
|
||||
if (req.method != "GET" && req.method != "HEAD") {
|
||||
return (pass);
|
||||
}
|
||||
|
||||
# Some generic URL manipulation, useful for all templates that follow
|
||||
# First remove the Google Analytics added parameters, useless for our backend
|
||||
if (req.url ~ "(\?|&)(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=") {
|
||||
set req.url = regsuball(req.url, "&(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "");
|
||||
set req.url = regsuball(req.url, "\?(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "?");
|
||||
set req.url = regsub(req.url, "\?&", "?");
|
||||
set req.url = regsub(req.url, "\?$", "");
|
||||
}
|
||||
|
||||
# Strip hash, server doesn't need it.
|
||||
if (req.url ~ "\#") {
|
||||
set req.url = regsub(req.url, "\#.*$", "");
|
||||
}
|
||||
|
||||
# Strip a trailing ? if it exists
|
||||
if (req.url ~ "\?$") {
|
||||
set req.url = regsub(req.url, "\?$", "");
|
||||
}
|
||||
|
||||
# Some generic cookie manipulation, useful for all templates that follow
|
||||
# Remove the "has_js" cookie
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "has_js=[^;]+(; )?", "");
|
||||
|
||||
# Remove any Google Analytics based cookies
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "__utm.=[^;]+(; )?", "");
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "_ga=[^;]+(; )?", "");
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "_gat=[^;]+(; )?", "");
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "utmctr=[^;]+(; )?", "");
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "utmcmd.=[^;]+(; )?", "");
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "utmccn.=[^;]+(; )?", "");
|
||||
|
||||
# Remove DoubleClick offensive cookies
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "__gads=[^;]+(; )?", "");
|
||||
|
||||
# Remove the Quant Capital cookies (added by some plugin, all __qca)
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "__qc.=[^;]+(; )?", "");
|
||||
|
||||
# Remove the AddThis cookies
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "__atuv.=[^;]+(; )?", "");
|
||||
|
||||
# Remove a ";" prefix in the cookie if present
|
||||
set req.http.Cookie = regsuball(req.http.Cookie, "^;\s*", "");
|
||||
|
||||
# Are there cookies left with only spaces or that are empty?
|
||||
if (req.http.cookie ~ "^\s*$") {
|
||||
unset req.http.cookie;
|
||||
}
|
||||
|
||||
if (req.http.Cache-Control ~ "(?i)no-cache") {
|
||||
#if (req.http.Cache-Control ~ "(?i)no-cache" && client.ip ~ editors) { # create the acl editors if you want to restrict the Ctrl-F5
|
||||
# http://varnish.projects.linpro.no/wiki/VCLExampleEnableForceRefresh
|
||||
# Ignore requests via proxy caches and badly behaved crawlers
|
||||
# like msnbot that send no-cache with every request.
|
||||
if (! (req.http.Via || req.http.User-Agent ~ "(?i)bot" || req.http.X-Purge)) {
|
||||
#set req.hash_always_miss = true; # Doesn't seems to refresh the object in the cache
|
||||
return(purge); # Couple this with restart in vcl_purge and X-Purge header to avoid loops
|
||||
}
|
||||
}
|
||||
|
||||
# Large static files are delivered directly to the end-user without
|
||||
# waiting for Varnish to fully read the file first.
|
||||
# Varnish 4 fully supports Streaming, so set do_stream in vcl_backend_response()
|
||||
if (req.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") {
|
||||
unset req.http.Cookie;
|
||||
return (hash);
|
||||
}
|
||||
|
||||
# Remove all cookies for static files
|
||||
# A valid discussion could be held on this line: do you really need to cache static files that don't cause load? Only if you have memory left.
|
||||
# Sure, there's disk I/O, but chances are your OS will already have these files in their buffers (thus memory).
|
||||
# Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
|
||||
if (req.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") {
|
||||
unset req.http.Cookie;
|
||||
return (hash);
|
||||
}
|
||||
|
||||
# Send Surrogate-Capability headers to announce ESI support to backend
|
||||
set req.http.Surrogate-Capability = "key=ESI/1.0";
|
||||
|
||||
if (req.http.Authorization) {
|
||||
# Not cacheable by default
|
||||
return (pass);
|
||||
}
|
||||
|
||||
return (hash);
|
||||
}
|
||||
|
||||
sub vcl_pipe {
|
||||
# Called upon entering pipe mode.
|
||||
# In this mode, the request is passed on to the backend, and any further data from both the client
|
||||
# and backend is passed on unaltered until either end closes the connection. Basically, Varnish will
|
||||
# degrade into a simple TCP proxy, shuffling bytes back and forth. For a connection in pipe mode,
|
||||
# no other VCL subroutine will ever get called after vcl_pipe.
|
||||
|
||||
# Note that only the first request to the backend will have
|
||||
# X-Forwarded-For set. If you use X-Forwarded-For and want to
|
||||
# have it set for all requests, make sure to have:
|
||||
# set bereq.http.connection = "close";
|
||||
# here. It is not set by default as it might break some broken web
|
||||
# applications, like IIS with NTLM authentication.
|
||||
|
||||
# set bereq.http.Connection = "Close";
|
||||
|
||||
# Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
|
||||
if (req.http.upgrade) {
|
||||
set bereq.http.upgrade = req.http.upgrade;
|
||||
}
|
||||
|
||||
return (pipe);
|
||||
}
|
||||
|
||||
sub vcl_pass {
|
||||
# Called upon entering pass mode. In this mode, the request is passed on to the backend, and the
|
||||
# backend's response is passed on to the client, but is not entered into the cache. Subsequent
|
||||
# requests submitted over the same client connection are handled normally.
|
||||
|
||||
# return (pass);
|
||||
}
|
||||
|
||||
# The data on which the hashing will take place
|
||||
sub vcl_hash {
|
||||
# Called after vcl_recv to create a hash value for the request. This is used as a key
|
||||
# to look up the object in Varnish.
|
||||
|
||||
hash_data(req.url);
|
||||
|
||||
if (req.http.host) {
|
||||
hash_data(req.http.host);
|
||||
} else {
|
||||
hash_data(server.ip);
|
||||
}
|
||||
|
||||
# hash cookies for requests that have them
|
||||
if (req.http.Cookie) {
|
||||
hash_data(req.http.Cookie);
|
||||
}
|
||||
}
|
||||
|
||||
sub vcl_hit {
|
||||
# Called when a cache lookup is successful.
|
||||
|
||||
if (obj.ttl >= 0s) {
|
||||
# A pure unadultered hit, deliver it
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
# https://www.varnish-cache.org/docs/trunk/users-guide/vcl-grace.html
|
||||
# When several clients are requesting the same page Varnish will send one request to the backend and place the others on hold while fetching one copy from the backend. In some products this is called request coalescing and Varnish does this automatically.
|
||||
# If you are serving thousands of hits per second the queue of waiting requests can get huge. There are two potential problems - one is a thundering herd problem - suddenly releasing a thousand threads to serve content might send the load sky high. Secondly - nobody likes to wait. To deal with this we can instruct Varnish to keep the objects in cache beyond their TTL and to serve the waiting requests somewhat stale content.
|
||||
|
||||
# if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) {
|
||||
# return (deliver);
|
||||
# } else {
|
||||
# return (fetch);
|
||||
# }
|
||||
|
||||
# We have no fresh fish. Lets look at the stale ones.
|
||||
if (std.healthy(req.backend_hint)) {
|
||||
# Backend is healthy. Limit age to 10s.
|
||||
if (obj.ttl + 10s > 0s) {
|
||||
#set req.http.grace = "normal(limited)";
|
||||
return (deliver);
|
||||
} else {
|
||||
# No candidate for grace. Fetch a fresh object.
|
||||
return(fetch);
|
||||
}
|
||||
} else {
|
||||
# backend is sick - use full grace
|
||||
if (obj.ttl + obj.grace > 0s) {
|
||||
#set req.http.grace = "full";
|
||||
return (deliver);
|
||||
} else {
|
||||
# no graced object.
|
||||
return (fetch);
|
||||
}
|
||||
}
|
||||
|
||||
# fetch & deliver once we get the result
|
||||
return (fetch); # Dead code, keep as a safeguard
|
||||
}
|
||||
|
||||
sub vcl_miss {
|
||||
# Called after a cache lookup if the requested document was not found in the cache. Its purpose
|
||||
# is to decide whether or not to attempt to retrieve the document from the backend, and which
|
||||
# backend to use.
|
||||
|
||||
return (fetch);
|
||||
}
|
||||
|
||||
# Handle the HTTP request coming from our backend
|
||||
sub vcl_backend_response {
|
||||
# Called after the response headers has been successfully retrieved from the backend.
|
||||
|
||||
# Pause ESI request and remove Surrogate-Control header
|
||||
if (beresp.http.Surrogate-Control ~ "ESI/1.0") {
|
||||
unset beresp.http.Surrogate-Control;
|
||||
set beresp.do_esi = true;
|
||||
}
|
||||
|
||||
# Enable cache for all static files
|
||||
# The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache.
|
||||
# Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
|
||||
if (bereq.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") {
|
||||
unset beresp.http.set-cookie;
|
||||
}
|
||||
|
||||
# Large static files are delivered directly to the end-user without
|
||||
# waiting for Varnish to fully read the file first.
|
||||
# Varnish 4 fully supports Streaming, so use streaming here to avoid locking.
|
||||
if (bereq.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") {
|
||||
unset beresp.http.set-cookie;
|
||||
set beresp.do_stream = true; # Check memory usage it'll grow in fetch_chunksize blocks (128k by default) if the backend doesn't send a Content-Length header, so only enable it for big objects
|
||||
}
|
||||
|
||||
# Sometimes, a 301 or 302 redirect formed via Apache's mod_rewrite can mess with the HTTP port that is being passed along.
|
||||
# This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box.
|
||||
# A redirect can then often redirect the end-user to a URL on :8080, where it should be :80.
|
||||
# This may need finetuning on your setup.
|
||||
#
|
||||
# To prevent accidental replace, we only filter the 301/302 redirects for now.
|
||||
if (beresp.status == 301 || beresp.status == 302) {
|
||||
set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", "");
|
||||
}
|
||||
|
||||
# Set 2min cache if unset for static files
|
||||
if (beresp.ttl <= 0s || beresp.http.Set-Cookie || beresp.http.Vary == "*") {
|
||||
set beresp.ttl = 120s; # Important, you shouldn't rely on this, SET YOUR HEADERS in the backend
|
||||
set beresp.uncacheable = true;
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
# Don't cache 50x responses
|
||||
if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) {
|
||||
return (abandon);
|
||||
}
|
||||
|
||||
# Allow stale content, in case the backend goes down.
|
||||
# make Varnish keep all objects for 6 hours beyond their TTL
|
||||
set beresp.grace = 6h;
|
||||
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
# The routine when we deliver the HTTP request to the user
|
||||
# Last chance to modify headers that are sent to the client
|
||||
sub vcl_deliver {
|
||||
# Called before a cached object is delivered to the client.
|
||||
|
||||
if (obj.hits > 0) { # Add debug header to see if it's a HIT/MISS and the number of hits, disable when not needed
|
||||
set resp.http.X-Cache = "HIT";
|
||||
} else {
|
||||
set resp.http.X-Cache = "MISS";
|
||||
}
|
||||
|
||||
# Please note that obj.hits behaviour changed in 4.0, now it counts per objecthead, not per object
|
||||
# and obj.hits may not be reset in some cases where bans are in use. See bug 1492 for details.
|
||||
# So take hits with a grain of salt
|
||||
set resp.http.X-Cache-Hits = obj.hits;
|
||||
|
||||
# Remove some headers: PHP version
|
||||
unset resp.http.X-Powered-By;
|
||||
|
||||
# Remove some headers: Apache version & OS
|
||||
unset resp.http.Server;
|
||||
unset resp.http.X-Drupal-Cache;
|
||||
unset resp.http.X-Varnish;
|
||||
unset resp.http.Via;
|
||||
unset resp.http.Link;
|
||||
unset resp.http.X-Generator;
|
||||
unset resp.http.X-Debug-Token;
|
||||
unset resp.http.X-Debug-Token-Link;
|
||||
set resp.http.Server = "${VARNISH_SERVER}";
|
||||
set resp.http.X-Powered-By = "MSI<surya.kejawen@gmail.com>";
|
||||
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
sub vcl_purge {
|
||||
# Only handle actual PURGE HTTP methods, everything else is discarded
|
||||
if (req.method != "PURGE") {
|
||||
# restart request
|
||||
set req.http.X-Purge = "Yes";
|
||||
return(restart);
|
||||
}
|
||||
}
|
||||
|
||||
sub vcl_synth {
|
||||
if (resp.status == 720) {
|
||||
# We use this special error status 720 to force redirects with 301 (permanent) redirects
|
||||
# To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
|
||||
set resp.http.Location = resp.reason;
|
||||
set resp.status = 301;
|
||||
return (deliver);
|
||||
} elseif (resp.status == 721) {
|
||||
# And we use error status 721 to force redirects with a 302 (temporary) redirect
|
||||
# To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
|
||||
set resp.http.Location = resp.reason;
|
||||
set resp.status = 302;
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
return (deliver);
|
||||
}
|
||||
|
||||
|
||||
sub vcl_fini {
|
||||
# Called when VCL is discarded only after all requests have exited the VCL.
|
||||
# Typically used to clean up VMODs.
|
||||
|
||||
return (ok);
|
||||
}
|
17
varnish/start.sh
Normal file
17
varnish/start.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
for name in BACKEND_PORT BACKEND_HOST VARNISH_SERVER
|
||||
do
|
||||
eval value=\$$name
|
||||
sed -i "s|\${${name}}|${value}|g" /etc/varnish/default.vcl
|
||||
done
|
||||
|
||||
exec bash -c \
|
||||
"exec varnishd \
|
||||
-a :$VARNISH_PORT \
|
||||
-T localhost:6082 \
|
||||
-F -u varnish \
|
||||
-f $VARNISH_CONFIG \
|
||||
-s malloc,$CACHE_SIZE \
|
||||
$VARNISHD_PARAMS"
|
@ -19,7 +19,7 @@ MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
# Mandatory Software's Installation
|
||||
#--------------------------------------------------------------------------
|
||||
#
|
||||
# Mandatory Software's such as ("php7.0-cli", "git", "vim", ....) are
|
||||
# Mandatory Software's such as ("php5.6-cli", "git", "vim", ....) are
|
||||
# installed on the base image 'laradock/workspace' image. If you want
|
||||
# to add more Software's or remove existing one, you need to edit the
|
||||
# base image (https://github.com/LaraDock/workspace).
|
||||
@ -60,8 +60,8 @@ RUN if [ ${INSTALL_SOAP} = true ]; then \
|
||||
# Install the PHP SOAP extension
|
||||
apt-get -y update && \
|
||||
add-apt-repository -y ppa:ondrej/php && \
|
||||
apt-get -y install libxml2-dev php7.0-soap && \
|
||||
echo "extension=soap.so" >> /etc/php/7.0/cli/conf.d/40-soap.ini \
|
||||
apt-get -y install libxml2-dev php5.6-soap && \
|
||||
echo "extension=soap.so" >> /etc/php/5.6/cli/conf.d/40-soap.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -128,12 +128,12 @@ ARG INSTALL_XDEBUG=false
|
||||
RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
# Load the xdebug extension only with phpunit commands
|
||||
apt-get update && \
|
||||
apt-get install -y --force-yes php7.0-xdebug && \
|
||||
sed -i 's/^/;/g' /etc/php/7.0/cli/conf.d/20-xdebug.ini && \
|
||||
apt-get install -y --force-yes php5.6-xdebug && \
|
||||
sed -i 's/^/;/g' /etc/php/5.6/cli/conf.d/20-xdebug.ini && \
|
||||
echo "alias phpunit='php -dzend_extension=xdebug.so /var/www/vendor/bin/phpunit'" >> ~/.bashrc \
|
||||
;fi
|
||||
# ADD for REMOTE debugging
|
||||
COPY ./xdebug.ini /etc/php/7.0/cli/conf.d/xdebug.ini
|
||||
COPY ./xdebug.ini /etc/php/5.6/cli/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# ssh:
|
||||
@ -164,7 +164,7 @@ ENV INSTALL_MONGO ${INSTALL_MONGO}
|
||||
RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
# Install the mongodb extension
|
||||
pecl install mongodb && \
|
||||
echo "extension=mongodb.so" >> /etc/php/7.0/cli/conf.d/30-mongodb.ini \
|
||||
echo "extension=mongodb.so" >> /etc/php/5.6/cli/conf.d/30-mongodb.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -260,7 +260,7 @@ ARG INSTALL_AEROSPIKE_EXTENSION=true
|
||||
ENV INSTALL_AEROSPIKE_EXTENSION ${INSTALL_AEROSPIKE_EXTENSION}
|
||||
|
||||
# Copy aerospike configration for remote debugging
|
||||
COPY ./aerospike.ini /etc/php/7.0/cli/conf.d/aerospike.ini
|
||||
COPY ./aerospike.ini /etc/php/5.6/cli/conf.d/aerospike.ini
|
||||
|
||||
RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
# Install the php aerospike extension
|
||||
@ -277,7 +277,7 @@ RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
;fi
|
||||
|
||||
RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = false ]; then \
|
||||
rm /etc/php/7.0/cli/conf.d/aerospike.ini \
|
||||
rm /etc/php/5.6/cli/conf.d/aerospike.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
@ -294,7 +294,7 @@ RUN if [ ${INSTALL_V8JS_EXTENSION} = true ]; then \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y php-dev php-pear libv8-5.4 \
|
||||
&& pecl install v8js \
|
||||
&& echo "extension=v8js.so" >> /etc/php/7.0/cli/php.ini \
|
||||
&& echo "extension=v8js.so" >> /etc/php/5.6/cli/php.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
426
workspace/Dockerfile-70
Normal file
426
workspace/Dockerfile-70
Normal file
@ -0,0 +1,426 @@
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Image Setup
|
||||
#--------------------------------------------------------------------------
|
||||
#
|
||||
# To edit the 'workspace' base Image, visit its repository on Github
|
||||
# https://github.com/LaraDock/workspace
|
||||
#
|
||||
# To change its version, see the available Tags on the Docker Hub:
|
||||
# https://hub.docker.com/r/laradock/workspace/tags/
|
||||
#
|
||||
|
||||
FROM laradock/workspace:1.3
|
||||
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Mandatory Software's Installation
|
||||
#--------------------------------------------------------------------------
|
||||
#
|
||||
# Mandatory Software's such as ("php7.0-cli", "git", "vim", ....) are
|
||||
# installed on the base image 'laradock/workspace' image. If you want
|
||||
# to add more Software's or remove existing one, you need to edit the
|
||||
# base image (https://github.com/LaraDock/workspace).
|
||||
#
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Optional Software's Installation
|
||||
#--------------------------------------------------------------------------
|
||||
#
|
||||
# Optional Software's will only be installed if you set them to `true`
|
||||
# in the `docker-compose.yml` before the build.
|
||||
# Example:
|
||||
# - INSTALL_NODE=false
|
||||
# - ...
|
||||
#
|
||||
|
||||
#####################################
|
||||
# Non-Root User:
|
||||
#####################################
|
||||
|
||||
# Add a non-root user to prevent files being created with root permissions on host machine.
|
||||
ARG PUID=1000
|
||||
ARG PGID=1000
|
||||
RUN groupadd -g $PGID laradock && \
|
||||
useradd -u $PUID -g laradock -m laradock
|
||||
|
||||
|
||||
#####################################
|
||||
# SOAP:
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
ARG INSTALL_SOAP=false
|
||||
ENV INSTALL_SOAP ${INSTALL_SOAP}
|
||||
|
||||
RUN if [ ${INSTALL_SOAP} = true ]; then \
|
||||
# Install the PHP SOAP extension
|
||||
apt-get -y update && \
|
||||
add-apt-repository -y ppa:ondrej/php && \
|
||||
apt-get -y install libxml2-dev php7.0-soap \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Set Timezone
|
||||
#####################################
|
||||
|
||||
ARG TZ=UTC
|
||||
ENV TZ ${TZ}
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
#####################################
|
||||
# Composer:
|
||||
#####################################
|
||||
|
||||
# Add the composer.json
|
||||
COPY ./composer.json /home/laradock/.composer/composer.json
|
||||
|
||||
# Make sure that ~/.composer belongs to laradock
|
||||
RUN chown -R laradock:laradock /home/laradock/.composer
|
||||
USER laradock
|
||||
|
||||
# Check if global install need to be ran
|
||||
ARG COMPOSER_GLOBAL_INSTALL=false
|
||||
ENV COMPOSER_GLOBAL_INSTALL ${COMPOSER_GLOBAL_INSTALL}
|
||||
RUN if [ ${COMPOSER_GLOBAL_INSTALL} = true ]; then \
|
||||
# run the install
|
||||
composer global install \
|
||||
;fi
|
||||
|
||||
# Export composer vendor path
|
||||
RUN echo "" >> ~/.bashrc && \
|
||||
echo 'export PATH="~/.composer/vendor/bin:$PATH"' >> ~/.bashrc
|
||||
|
||||
#####################################
|
||||
# Crontab
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
COPY ./crontab /etc/cron.d
|
||||
RUN chmod -R 644 /etc/cron.d
|
||||
|
||||
#####################################
|
||||
# User Aliases
|
||||
#####################################
|
||||
|
||||
USER laradock
|
||||
COPY ./aliases.sh /home/laradock/aliases.sh
|
||||
RUN echo "" >> ~/.bashrc && \
|
||||
echo "# Load Custom Aliases" >> ~/.bashrc && \
|
||||
echo "source /home/laradock/aliases.sh" >> ~/.bashrc && \
|
||||
echo "" >> ~/.bashrc
|
||||
|
||||
USER root
|
||||
RUN echo "" >> ~/.bashrc && \
|
||||
echo "# Load Custom Aliases" >> ~/.bashrc && \
|
||||
echo "source /home/laradock/aliases.sh" >> ~/.bashrc && \
|
||||
echo "" >> ~/.bashrc
|
||||
|
||||
#####################################
|
||||
# xDebug:
|
||||
#####################################
|
||||
|
||||
ARG INSTALL_XDEBUG=false
|
||||
RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
# Load the xdebug extension only with phpunit commands
|
||||
apt-get update && \
|
||||
apt-get install -y --force-yes php7.0-xdebug && \
|
||||
sed -i 's/^/;/g' /etc/php/7.0/cli/conf.d/20-xdebug.ini && \
|
||||
echo "alias phpunit='php -dzend_extension=xdebug.so /var/www/vendor/bin/phpunit'" >> ~/.bashrc \
|
||||
;fi
|
||||
# ADD for REMOTE debugging
|
||||
COPY ./xdebug.ini /etc/php/7.0/cli/conf.d/xdebug.ini
|
||||
|
||||
#####################################
|
||||
# ssh:
|
||||
#####################################
|
||||
ARG INSTALL_WORKSPACE_SSH=false
|
||||
ENV INSTALL_WORKSPACE_SSH ${INSTALL_WORKSPACE_SSH}
|
||||
|
||||
ADD insecure_id_rsa /tmp/id_rsa
|
||||
ADD insecure_id_rsa.pub /tmp/id_rsa.pub
|
||||
|
||||
RUN if [ ${INSTALL_WORKSPACE_SSH} = true ]; then \
|
||||
rm -f /etc/service/sshd/down && \
|
||||
cat /tmp/id_rsa.pub >> /root/.ssh/authorized_keys \
|
||||
&& cat /tmp/id_rsa.pub >> /root/.ssh/id_rsa.pub \
|
||||
&& cat /tmp/id_rsa >> /root/.ssh/id_rsa \
|
||||
&& rm -f /tmp/id_rsa* \
|
||||
&& chmod 644 /root/.ssh/authorized_keys /root/.ssh/id_rsa.pub \
|
||||
&& chmod 400 /root/.ssh/id_rsa \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# MongoDB:
|
||||
#####################################
|
||||
|
||||
# Check if Mongo needs to be installed
|
||||
ARG INSTALL_MONGO=false
|
||||
ENV INSTALL_MONGO ${INSTALL_MONGO}
|
||||
RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
# Install the mongodb extension
|
||||
pecl install mongodb && \
|
||||
echo "extension=mongodb.so" >> /etc/php/7.0/cli/conf.d/30-mongodb.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Drush:
|
||||
#####################################
|
||||
USER root
|
||||
ENV DRUSH_VERSION 8.1.2
|
||||
ARG INSTALL_DRUSH=false
|
||||
ENV INSTALL_DRUSH ${INSTALL_DRUSH}
|
||||
RUN if [ ${INSTALL_DRUSH} = true ]; then \
|
||||
# Install Drush 8 with the phar file.
|
||||
curl -fsSL -o /usr/local/bin/drush https://github.com/drush-ops/drush/releases/download/$DRUSH_VERSION/drush.phar | bash && \
|
||||
chmod +x /usr/local/bin/drush && \
|
||||
drush core-status \
|
||||
;fi
|
||||
|
||||
USER laradock
|
||||
|
||||
#####################################
|
||||
# Node / NVM:
|
||||
#####################################
|
||||
|
||||
# Check if NVM needs to be installed
|
||||
ARG NODE_VERSION=stable
|
||||
ENV NODE_VERSION ${NODE_VERSION}
|
||||
ARG INSTALL_NODE=false
|
||||
ENV INSTALL_NODE ${INSTALL_NODE}
|
||||
ENV NVM_DIR /home/laradock/.nvm
|
||||
RUN if [ ${INSTALL_NODE} = true ]; then \
|
||||
# Install nvm (A Node Version Manager)
|
||||
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.1/install.sh | bash && \
|
||||
. $NVM_DIR/nvm.sh && \
|
||||
nvm install ${NODE_VERSION} && \
|
||||
nvm use ${NODE_VERSION} && \
|
||||
nvm alias ${NODE_VERSION} && \
|
||||
npm install -g gulp bower vue-cli \
|
||||
;fi
|
||||
|
||||
# Wouldn't execute when added to the RUN statement in the above block
|
||||
# Source NVM when loading bash since ~/.profile isn't loaded on non-login shell
|
||||
RUN if [ ${INSTALL_NODE} = true ]; then \
|
||||
echo "" >> ~/.bashrc && \
|
||||
echo 'export NVM_DIR="$HOME/.nvm"' >> ~/.bashrc && \
|
||||
echo '[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm' >> ~/.bashrc \
|
||||
;fi
|
||||
|
||||
# Add NVM binaries to root's .bashrc
|
||||
USER root
|
||||
|
||||
RUN if [ ${INSTALL_NODE} = true ]; then \
|
||||
echo "" >> ~/.bashrc && \
|
||||
echo 'export NVM_DIR="/home/laradock/.nvm"' >> ~/.bashrc && \
|
||||
echo '[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm' >> ~/.bashrc \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# YARN:
|
||||
#####################################
|
||||
|
||||
USER laradock
|
||||
|
||||
ARG INSTALL_YARN=false
|
||||
ENV INSTALL_YARN ${INSTALL_YARN}
|
||||
ARG YARN_VERSION=latest
|
||||
ENV YARN_VERSION ${YARN_VERSION}
|
||||
|
||||
RUN if [ ${INSTALL_YARN} = true ]; then \
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" && \
|
||||
if [ ${YARN_VERSION} = "latest" ]; then \
|
||||
curl -o- -L https://yarnpkg.com/install.sh | bash; \
|
||||
else \
|
||||
curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version ${YARN_VERSION}; \
|
||||
fi && \
|
||||
echo "" >> ~/.bashrc && \
|
||||
echo 'export PATH="$HOME/.yarn/bin:$PATH"' >> ~/.bashrc \
|
||||
;fi
|
||||
|
||||
# Add YARN binaries to root's .bashrc
|
||||
USER root
|
||||
|
||||
RUN if [ ${INSTALL_YARN} = true ]; then \
|
||||
echo "" >> ~/.bashrc && \
|
||||
echo 'export YARN_DIR="/home/laradock/.yarn"' >> ~/.bashrc && \
|
||||
echo 'export PATH="$YARN_DIR/bin:$PATH"' >> ~/.bashrc \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# PHP Aerospike:
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
ARG INSTALL_AEROSPIKE_EXTENSION=true
|
||||
ENV INSTALL_AEROSPIKE_EXTENSION ${INSTALL_AEROSPIKE_EXTENSION}
|
||||
|
||||
# Copy aerospike configration for remote debugging
|
||||
COPY ./aerospike.ini /etc/php/7.0/cli/conf.d/aerospike.ini
|
||||
|
||||
RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = true ]; then \
|
||||
# Install the php aerospike extension
|
||||
curl -L -o /tmp/aerospike-client-php.tar.gz "https://github.com/luciano-jr/aerospike-client-php/archive/master.tar.gz" \
|
||||
&& mkdir -p aerospike-client-php \
|
||||
&& tar -C aerospike-client-php -zxvf /tmp/aerospike-client-php.tar.gz --strip 1 \
|
||||
&& ( \
|
||||
cd aerospike-client-php/src/aerospike \
|
||||
&& phpize \
|
||||
&& ./build.sh \
|
||||
&& make install \
|
||||
) \
|
||||
&& rm /tmp/aerospike-client-php.tar.gz \
|
||||
;fi
|
||||
|
||||
RUN if [ ${INSTALL_AEROSPIKE_EXTENSION} = false ]; then \
|
||||
rm /etc/php/7.0/cli/conf.d/aerospike.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# PHP V8JS:
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
ARG INSTALL_V8JS_EXTENSION=false
|
||||
ENV INSTALL_V8JS_EXTENSION ${INSTALL_V8JS_EXTENSION}
|
||||
|
||||
RUN if [ ${INSTALL_V8JS_EXTENSION} = true ]; then \
|
||||
# Install the php V8JS extension
|
||||
add-apt-repository -y ppa:pinepain/libv8-5.4 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y php-dev php-pear libv8-5.4 \
|
||||
&& pecl install v8js \
|
||||
&& echo "extension=v8js.so" >> /etc/php/7.0/cli/php.ini \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Non-root user : PHPUnit path
|
||||
#####################################
|
||||
|
||||
# add ./vendor/bin to non-root user's bashrc (needed for phpunit)
|
||||
USER laradock
|
||||
|
||||
RUN echo "" >> ~/.bashrc && \
|
||||
echo 'export PATH="/var/www/vendor/bin:$PATH"' >> ~/.bashrc
|
||||
|
||||
#####################################
|
||||
# Laravel Artisan Alias
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
RUN echo "" >> ~/.bashrc && \
|
||||
echo 'alias art="php artisan"' >> ~/.bashrc
|
||||
|
||||
#####################################
|
||||
# Laravel Envoy:
|
||||
#####################################
|
||||
USER laradock
|
||||
|
||||
ARG INSTALL_LARAVEL_ENVOY=true
|
||||
ENV INSTALL_LARAVEL_ENVOY ${INSTALL_LARAVEL_ENVOY}
|
||||
|
||||
RUN if [ ${INSTALL_LARAVEL_ENVOY} = true ]; then \
|
||||
# Install the Laravel Envoy
|
||||
composer global require "laravel/envoy=~1.0" \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Deployer:
|
||||
#####################################
|
||||
USER laradock
|
||||
|
||||
ARG INSTALL_DEPLOYER=false
|
||||
ENV INSTALL_DEPLOYER ${INSTALL_DEPLOYER}
|
||||
|
||||
RUN if [ ${INSTALL_DEPLOYER} = true ]; then \
|
||||
# Install the Deployer
|
||||
composer global require "deployer/deployer" \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Linuxbrew:
|
||||
#####################################
|
||||
USER root
|
||||
|
||||
ARG INSTALL_LINUXBREW=true
|
||||
ENV INSTALL_LINUXBREW ${INSTALL_LINUXBREW}
|
||||
|
||||
RUN if [ ${INSTALL_LINUXBREW} = true ]; then \
|
||||
|
||||
# Preparation
|
||||
apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y build-essential make cmake scons curl git \
|
||||
ruby autoconf automake autoconf-archive \
|
||||
gettext libtool flex bison \
|
||||
libbz2-dev libcurl4-openssl-dev \
|
||||
libexpat-dev libncurses-dev && \
|
||||
|
||||
# Install the Linuxbrew
|
||||
git clone https://github.com/Homebrew/linuxbrew.git ~/.linuxbrew && \
|
||||
|
||||
echo "" >> ~/.bashrc && \
|
||||
echo 'export PKG_CONFIG_PATH"=/usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig:/usr/lib64/pkgconfig:/usr/lib/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig:/usr/lib64/pkgconfig:/usr/share/pkgconfig:$PKG_CONFIG_PATH"' >> ~/.bashrc && \
|
||||
|
||||
# Setup linuxbrew
|
||||
echo 'export LINUXBREWHOME="$HOME/.linuxbrew"' >> ~/.bashrc && \
|
||||
echo 'export PATH="$LINUXBREWHOME/bin:$PATH"' >> ~/.bashrc && \
|
||||
echo 'export MANPATH="$LINUXBREWHOME/man:$MANPATH"' >> ~/.bashrc && \
|
||||
echo 'export PKG_CONFIG_PATH="$LINUXBREWHOME/lib64/pkgconfig:$LINUXBREWHOME/lib/pkgconfig:$PKG_CONFIG_PATH"' >> ~/.bashrc && \
|
||||
echo 'export LD_LIBRARY_PATH="$LINUXBREWHOME/lib64:$LINUXBREWHOME/lib:$LD_LIBRARY_PATH"' >> ~/.bashrc \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Minio:
|
||||
#####################################
|
||||
USER root
|
||||
ARG INSTALL_MC=false
|
||||
ENV INSTALL_MC ${INSTALL_MC}
|
||||
|
||||
COPY mc/config.json /root/.mc/config.json
|
||||
|
||||
RUN if [ ${INSTALL_MC} = true ]; then\
|
||||
curl -fsSL -o /usr/local/bin/mc https://dl.minio.io/client/mc/release/linux-amd64/mc && \
|
||||
chmod +x /usr/local/bin/mc \
|
||||
;fi
|
||||
|
||||
#####################################
|
||||
# Symfony:
|
||||
#####################################
|
||||
USER root
|
||||
ARG INSTALL_SYMFONY=false
|
||||
ENV INSTALL_SYMFONY ${INSTALL_SYMFONY}
|
||||
RUN if [ ${INSTALL_SYMFONY} = true ]; then \
|
||||
|
||||
mkdir -p /usr/local/bin \
|
||||
&& curl -LsS https://symfony.com/installer -o /usr/local/bin/symfony \
|
||||
&& chmod a+x /usr/local/bin/symfony \
|
||||
|
||||
# Symfony 3 alias
|
||||
&& echo 'alias dev="php bin/console -e=dev"' >> ~/.bashrc \
|
||||
&& echo 'alias prod="php bin/console -e=prod"' >> ~/.bashrc \
|
||||
|
||||
# Symfony 2 alias
|
||||
# && echo 'alias dev="php app/console -e=dev"' >> ~/.bashrc \
|
||||
# && echo 'alias prod="php app/console -e=prod"' >> ~/.bashrc \
|
||||
|
||||
;fi
|
||||
|
||||
USER laradock
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
# Final Touch
|
||||
#--------------------------------------------------------------------------
|
||||
#
|
||||
|
||||
# Clean up
|
||||
USER root
|
||||
RUN apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Set default work directory
|
||||
WORKDIR /var/www
|
@ -10,9 +10,10 @@
|
||||
# https://hub.docker.com/r/laradock/workspace/tags/
|
||||
#
|
||||
|
||||
FROM laradock/workspace:1.3 # placeholder. Need change after new image would be built.
|
||||
# placeholder. Need change after new image would be built.
|
||||
FROM laradock/workspace:1.3
|
||||
|
||||
MAINTAINER MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
MAINTAINER Mahmoud Zalt <mahmoud@zalt.me>
|
||||
|
||||
#
|
||||
#--------------------------------------------------------------------------
|
||||
|
Reference in New Issue
Block a user