NOTE: I couldn’t get this procedure to work on Amazon Linux (2023) because podman wasn’t able to create the proper iptables rules – something to do with the backend it uses for creating the iptables rules. But you can get Rocky Linux for free from the AWS MarketPlace.
1/ Install podman (e.g., on Rocky 9 Linux)
sudo dnf install -y podman
sudo systemctl enable podman
sudo systemctl start podman
2/ pull the images from docker.io images registry
podman pull selenium/hub:latest
podman pull selenium/node-chrome:latest
# if you need firefox: podman pull selenium/node-firefox:latest
3/ install one or more web browsers
sudo yum install -y firefox
wget https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm
sudo yum install -y ./google-chrome-stable_current_x86_64.rpm
4/ create the network for the containers:
sudo podman network create selenium-grid
5/ start containers based on the images
sudo podman create –name selenium-hub -p 4444:4444 –network selenium-grid selenium/hub:latest
sudo podman create –name selenium-node1 -e SE_EVENT_BUS_HOST=selenium-hub -e SE_EVENT_BUS_PUBLISH_PORT=4442 -e SE_EVENT_BUS_SUBSCRIBE_PORT=4443 –network selenium-grid –shm-size=1g selenium/node-chrome:latest
sudo podman create –name selenium-node2 -e SE_EVENT_BUS_HOST=selenium-hub -e SE_EVENT_BUS_PUBLISH_PORT=4442 -e SE_EVENT_BUS_SUBSCRIBE_PORT=4443 –network selenium-grid –shm-size=1g selenium/node-chrome:latest
sudo podman create –name selenium-node3 -e SE_EVENT_BUS_HOST=selenium-hub -e SE_EVENT_BUS_PUBLISH_PORT=4442 -e SE_EVENT_BUS_SUBSCRIBE_PORT=4443 –network selenium-grid –shm-size=1g selenium/node-chrome:latest
sudo podman create –name selenium-node4 -e SE_EVENT_BUS_HOST=selenium-hub -e SE_EVENT_BUS_PUBLISH_PORT=4442 -e SE_EVENT_BUS_SUBSCRIBE_PORT=4443 –network selenium-grid –shm-size=1g selenium/node-chrome:latest
6/ install xauth package if you want to display the output (browser) on your client when you run the scripts
sudo yum install -y xauth
7/ Create Systemd service files for one hub and two nodes (I originally generated the following using “podman generate systemd –new “)
sudo podman generate systemd –new selenium-hub | sudo tee /etc/systemd/system/selenium-hub.service
sudo podman generate systemd –new selenium-node1 | sudo tee /etc/systemd/system/selenium-node1.service
sudo podman generate systemd –new selenium-node2 | sudo tee /etc/systemd/system/selenium-node2.service
sudo podman generate systemd –new selenium-node3 | sudo tee /etc/systemd/system/selenium-node3.service
sudo podman generate systemd –new selenium-node4 | sudo tee /etc/systemd/system/selenium-node4.service
sudo systemctl daemon-reload
- below are the sample content of the service files for selenium-hub and selenium-node1:
cat /etc/systemd/system/selenium-hub.service
[Unit]
Description=Podman container-selenium-hub.service
Documentation=man:podman-generate-systemd(1)
Wants=network-online.target
After=network-online.target
RequiresMountsFor=%t/containers
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
# override default session of 1 = # of host CPUs don’t need this since I am using multiple worker/nodes instead so default of 1 is OK
# Environment=SE_NODE_MAX_SESSIONS=2
# Environment=SE_NODE_OVERRIDE_MAX_SESSIONS=true
TimeoutStopSec=70
ExecStart=/usr/bin/podman run \
–cidfile=%t/%n.ctr-id \
–cgroups=no-conmon \
–rm \
–sdnotify=conmon \
-d \
–replace \
–name selenium-hub \
-p 4444:4444 \
–network selenium-grid selenium/hub:latest
ExecStop=/usr/bin/podman stop \
–ignore -t 10 \
–cidfile=%t/%n.ctr-id
ExecStopPost=/usr/bin/podman rm \
-f \
–ignore -t 10 \
–cidfile=%t/%n.ctr-id
Type=notify
NotifyAccess=all
[Install]
WantedBy=default.target
cat /etc/systemd/system/selenium-node1.service
[Unit]
Description=Podman container-selenium-node.service
Documentation=man:podman-generate-systemd(1)
Wants=network-online.target
After=network-online.target
RequiresMountsFor=%t/containers
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
TimeoutStopSec=70
ExecStart=/usr/bin/podman run \
–cidfile=%t/%n.ctr-id \
–cgroups=no-conmon \
–rm \
–sdnotify=conmon \
-d \
–replace \
–name selenium-node1 \
-e SE_EVENT_BUS_HOST=selenium-hub \
-e SE_EVENT_BUS_PUBLISH_PORT=4442 \
-e SE_EVENT_BUS_SUBSCRIBE_PORT=4443 \
–network selenium-grid \
–shm-size=1g selenium/node-chrome:latest
ExecStop=/usr/bin/podman stop \
–ignore -t 10 \
–cidfile=%t/%n.ctr-id
ExecStopPost=/usr/bin/podman rm \
-f \
–ignore -t 10 \
–cidfile=%t/%n.ctr-id
Type=notify
NotifyAccess=all
[Install]
WantedBy=default.target
8/ Stop (and remove) the running containers (if any) that you have created service files for
(“podman ps -a” stops all running containers and “podman rm -a” removes all containers)
sudo systemctl daemon-reload
sudo podman ps -a
sudo podman stop selenium-hub
sudo podman stop selenium-node1
sudo podman stop selenium-node2
sudo podman stop selenium-node3
sudo podman stop selenium-node4
sudo podman rm selenium-hub
sudo podman rm selenium-node1
sudo podman rm selenium-node2
sudo podman rm selenium-node3
sudo podman rm selenium-node4
9/ Enable and start the Systemd services for the hub and two nodes
sudo systemctl daemon-reload
sudo systemctl enable selenium-hub.service
sudo systemctl start selenium-hub.service
sudo systemctl status selenium-hub.service
sudo systemctl enable selenium-node1.service
sudo systemctl start selenium-node1.service
sudo systemctl status selenium-node1.service
sudo systemctl enable selenium-node2.service
sudo systemctl start selenium-node2.service
sudo systemctl status selenium-node2.service
10/ launch a web browser on the host (running the hub and nodes) and connect to http://localhost:4444/ to access the Hub
- click on the camera/video-recorder icon, you will be prompted for the VNC password which is “secret”
- you can watch the automation going on
11/ Using podman to see the hub and node containers running (I actually have 3 nodes though only listed two service files above)
[root@rocky system]# podman ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
731d0ecd1467 docker.io/selenium/hub:latest /opt/bin/entry_po… 22 hours ago Up 22 hours 0.0.0.0:4444->4444/tcp, 4442-4443/tcp selenium-hub
193afdc255b1 docker.io/selenium/node-chrome:latest /opt/bin/entry_po… 15 minutes ago Up 15 minutes 5900/tcp, 9000/tcp selenium-node1
d60109ddb11f docker.io/selenium/node-chrome:latest /opt/bin/entry_po… 15 minutes ago Up 15 minutes 5900/tcp, 9000/tcp selenium-node2
011da3984d4b docker.io/selenium/node-chrome:latest /opt/bin/entry_po… 3 seconds ago Up 4 seconds 5900/tcp, 9000/tcp selenium-node3
[root@rocky system]#
12/ Submit your jobs e..g, run your python scripts and you can observe the automation in the UI
NOTE: if doing a lot of debugging and aborting script manually at the shell prompt, it takes a while
for selenium-hub to clear out the session. In the Selenium HUB UI, go to “Sessions” > Click on the “i” under the Capabilities column for the aborted session and click “DELETE”
in the session details pop-up screen. Another way is to restart the selenium-nodeX.service using systemctl (though the latter method is preferrable).
NOTE: by default max session is set to one – meaning only one session runs on a node with one CPU (the underlying host)
it can be increased if you are sure the container performance can support it (especially if you r host has more than one CPU – not cores),
but instead easier to create a second node (container)
with the default sessions are queued and run after one another
wiht a second node, the hub can schedule another session on the second node
add the following to the [Service] section of the selenium-hub systemd service file (override default session of 1 = # of host CPUs)
Environment=SE_NODE_MAX_SESSIONS=2
Environment=SE_NODE_OVERRIDE_MAX_SESSIONS=true
Some other parameters that can go in the service file for a container:
AutoUpdate=registry
PublishPort=4444:4444
Volume=/dev/shm:/dev/shm
AddCapability=AUDIT_WRITE NET_RAW
- Other commands e..g,
sudp podman stop
sudo podman ps -a
sudo podman rm
sudo podman stats
Using Selenium (podman) container – details on doing it manually to know what is actually happening behind the scenes when you use the Systemd service above
- To utilize a Selenium container image for a script, follow these steps:
- Install Docker or Podman:
sudo dnf install -y podman
sudo systemctl start podman
sudo systemctl enable podman - Pull the Selenium Image (Download the desired Selenium standalone image from Docker Hub). For instance, to use Chrome:
sudo podman pull selenium/standalone-chrome - Run the Selenium Container: Start the container, exposing the necessary port for communication with the Selenium server. For example, to run Chrome:
sudo podman run –name selechrome –cap-add=AUDIT_WRITE –cap-add=NET_RAW -d -p 4444:4444 -v /dev/shm:/dev/shm selenium/standalone-chrome
— create a Python virtual environment and install Selenium in it:
sudo yum install -y python3.12 (latest as at 8/31/2025 – latest Selenium needs something newer than 3.9.x which may be the default)
python3.12 -m venv selenium_env
[aitayemi@rocky ~]$ source selenium_env/bin/activate
((selenium_env) ) [aitayemi@rocky ~]$ pip install –upgrade pip
((selenium_env) ) [aitayemi@rocky ~]$ pip install selenium==4.35.0
((selenium_env) ) [aitayemi@rocky ~]$ deactivate
- Configure Selenium WebDriver in your Script: In your Selenium script, configure the WebDriver to connect to the remote Selenium server running in the Docker container. The URL will typically be http://localhost:4444/wd/hub (or the IP address of the Docker host if running remotely, and the mapped port).
sample script:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
driver = webdriver.Remote(
command_executor='http://localhost:4444/wd/hub',
options=chrome_options
)
driver.get("http://www.google.com")
print(driver.title)
driver.quit()
- In MY OWN psp.py script, I replaced the initializeBrowser() function with the call that creates the browser driver object using the running selenium container
Initialize Browse Object
URL=”https://www.acme.com/sweepstakes/all-acme-sweeps”
driver=_acme_psp.initializeBrowser(URL)
chrome_options = Options()
driver = webdriver.Remote(
command_executor=’http://localhost:4444/wd/hub’,
options=chrome_options
)
- To see what is going on inside the container, launch a browser locally and go to the WebDriver URL http://localhost:4444/wd/hub
- click on the camera/video-recorder icon, you will be prompted for the VNC password which is “secret”
- you can watch the automation going on
- NO more cron scheduling issue where I have to ensure no other chrome/firefox sessions are running! I don’t have the –user-data-dir in use error any more!!
NOTE: the session is auto-set to the # of CPU on the host (not cores – so for example I only have one CPU on my Linux laptop/host). Overriding it to 2 means I can have two concurrent Chrome browser sessions (i.e., 2 python scripts initiating chrome sessions) - Run the Selenium Grid (Chrome) Container – for ACME Super Prize (to optionally limit ram disk –shm-size=512m):
sudo podman run –name acme_psp –cap-add=AUDIT_WRITE –cap-add=NET_RAW -d -p 4444:4444 -v /dev/shm:/dev/shm -e SE_NODE_MAX_SESSIONS=2 -e SE_NODE_OVERRIDE_MAX_SESSIONS=true selenium/standalone-chrome - to make it easier to run the script that typing /path/to/python/venv/python3 everytime, it is easier to make the script file executable and set the first line in the script to invoke the python executable in the virtual environment i.e.,
!/home/aitayemi/selenium_env/bin/python3
NOTE: if you install podman on a system such as amazon-linux that doesn’t have the package by default, you need to create directory /etc/containers/ and create two files in it – registries.conf and policy.conf. RedHat variants such as Rocky Linux already come with the directory and the two files in it.
[root@rocky ~]# cat /etc/containers/registries.conf
unqualified-search-registries = [“registry.access.redhat.com”, “registry.redhat. io”, “docker.io”]
[root@rocky ~]# cat /etc/containers/policy.json
{
“default”: [
{
“type”: “insecureAcceptAnything”
}
],
“transports”: {
“docker”: {
“registry.access.redhat.com”: [
{
“type”: “signedBy”,
“keyType”: “GPGKeys”,
“keyPaths”: [“/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release”, “/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta”]
}
],
“registry.redhat.io”: [
{
“type”: “signedBy”,
“keyType”: “GPGKeys”,
“keyPaths”: [“/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release”, “/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta”]
}
]
},
“docker-daemon”: {
“”: [
{
“type”: “insecureAcceptAnything”
}
]
}
}
}
[root@rocky ~]#






















