点击上方蓝字
关注大侠之运维
后台回复shell分享 获取200+脚本示例

1.Dos 攻击防范(自动屏蔽攻击 IP)
#!/bin/bashDATE=$(date +%d/%b/%Y:%H:%M)LOG_FILE=/usr/local/nginx/logs/demo2.access.logABNORMAL_IP=$(tail -n5000 $LOG_FILE |grep $DATE |awk '{a[$1]++}END{for(i in a)if(a[i]>10)print i}')for IP in $ABNORMAL_IP; doif [ $(iptables -vnL |grep -c "$IP") -eq 0 ]; theniptables -I INPUT -s $IP -j DROPecho "$(date +'%F_%T') $IP" >> tmp/drop_ip.logfidone
2.Linux 系统发送告警脚本
# yum install mailx# vi etc/mail.rcset from=baojingtongzhi@163.com smtp=smtp.163.comset smtp-auth-user=baojingtongzhi@163.com smtp-auth-password=123456 set smtp-auth=login
3.MySQL 数据库备份单循环
#!/bin/bashDATE=$(date +%F_%H-%M-%S)HOST=localhostUSER=backupPASS=123.comBACKUP_DIR=/data/db_backupDB_LIST=$(mysql -h$HOST -u$USER -p$PASS -s -e "show databases;" 2>/dev/null |egrep -v "Database|information_schema|mysql |performance_schema|sys")for DB in $DB_LIST; doBACKUP_NAME=$BACKUP_DIR/${DB}_${DATE}.sqlif ! mysqldump -h$HOST -u$USER -p$PASS -B $DB > $BACKUP_NAME 2>/dev/null; thenecho "$BACKUP_NAME 备份失败 !"fidone
4.MySQL 数据库备份多循环
#!/bin/bashDATE=$(date +%F_%H-%M-%S)HOST=localhostUSER=backupPASS=123.comBACKUP_DIR=/data/db_backupDB_LIST=$(mysql -h$HOST -u$USER -p$PASS -s -e "show databases;" 2>/dev/null |egrep -v "Database|information_schema|mysql |performance_schema|sys")for DB in $DB_LIST; doBACKUP_DB_DIR=$BACKUP_DIR/${DB}_${DATE}[ ! -d $BACKUP_DB_DIR ] && mkdir -p $BACKUP_DB_DIR &>/dev/nullTABLE_LIST=$(mysql -h$HOST -u$USER -p$PASS -s -e "use $DB;show tables;" 2>/dev/null)for TABLE in $TABLE_LIST; doBACKUP_NAME=$BACKUP_DB_DIR/${TABLE}.sqlif ! mysqldump -h$HOST -u$USER -p$PASS $DB $TABLE > $BACKUP_NAME 2>/dev/null; thenecho "$BACKUP_NAME 备份失败 !"fidonedone
5.Nginx 访问访问日志按天切割
#!/bin/bashLOG_DIR=/usr/local/nginx/logsYESTERDAY_TIME=$(date -d "yesterday" +%F)LOG_MONTH_DIR=$LOG_DIR/$(date +"%Y-%m")LOG_FILE_LIST="default.access.log"for LOG_FILE in $LOG_FILE_LIST; do[ ! -d $LOG_MONTH_DIR ] && mkdir -p $LOG_MONTH_DIRmv $LOG_DIR/$LOG_FILE $LOG_MONTH_DIR/${LOG_FILE}_${YESTERDAY_TIME} donekill -USR1 $(cat var/run/nginx.pid)
6.Nginx 访问日志分析脚本
#!/bin/bash# 日志格式 : $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for" LOG_FILE=$1echo "统计访问最多的10个IP"awk '{a[$1]++}END{print "UV:",length(a);for(v in a)print v,a[v]}' $LOG_FILE |sort -k2 -nr |head -10echo "----------------------"echo "统计时间段访问最多的IP"awk '$4>="[01/Dec/2018:13:20:25" && $4<="[27/Nov/2018:16:20:49"{a[$1]++}END{for(v in a)print v,a[v]}' $LOG_FILE |sort -k2 -nr|head -10echo "----------------------"echo "统计访问最多的10个页面"awk '{a[$7]++}END{print "PV:",length(a);for(v in a){if(a[v]>10)print v,a[v]}}' $LOG_FILE |sort -k2 -nrecho "----------------------"echo "统计访问页面状态码数量"awk '{a[$7" "$9]++}END{for(v in a){if(a[v]>5)print v,a[v]}}'
7.查看网卡实时流量脚本
#!/bin/bashNIC=$1echo -e " In ------ Out"while true; doOLD_IN=$(awk '$0~"'$NIC'"{print $2}' proc/net/dev)OLD_OUT=$(awk '$0~"'$NIC'"{print $10}' proc/net/dev)sleep 1NEW_IN=$(awk '$0~"'$NIC'"{print $2}' proc/net/dev)NEW_OUT=$(awk '$0~"'$NIC'"{print $10}' proc/net/dev)IN=$(printf "%.1f%s" "$((($NEW_IN-$OLD_IN)/1024))" "KB/s")OUT=$(printf "%.1f%s" "$((($NEW_OUT-$OLD_OUT)/1024))" "KB/s")echo "$IN $OUT"sleep 1done
8.服务器系统配置初始化
#/bin/bash# 设置时区并同步时间ln -s usr/share/zoneinfo/Asia/Shanghai etc/localtimeif ! crontab -l |grep ntpdate &>/dev/null ; then(echo "* 1 * * * ntpdate time.windows.com >/dev/null 2>&1";crontab -l) |crontabfi# 禁用selinuxsed -i '/SELINUX/{s/permissive/disabled/}' etc/selinux/config# 关闭防火墙if egrep "7.[0-9]" etc/redhat-release &>/dev/null; thensystemctl stop firewalldsystemctl disable firewalldelif egrep "6.[0-9]" etc/redhat-release &>/dev/null; thenservice iptables stopchkconfig iptables offfi# 历史命令显示操作时间if ! grep HISTTIMEFORMAT etc/bashrc; thenecho 'export HISTTIMEFORMAT="%F %T `whoami` "' >> /etc/bashrcfi# SSH超时时间if ! grep "TMOUT=600" etc/profile &>/dev/null; thenecho "export TMOUT=600" >> /etc/profilefi# 禁止root远程登录sed -i 's/#PermitRootLogin yes/PermitRootLogin no/' etc/ssh/sshd_config# 禁止定时任务向发送邮件sed -i 's/^MAILTO=root/MAILTO=""/' etc/crontab# 设置最大打开文件数if ! grep "* soft nofile 65535" etc/security/limits.conf &>/dev/null; then cat >> /etc/security/limits.conf << EOF* soft nofile 65535* hard nofile 65535EOFfi# 系统内核优化cat >> /etc/sysctl.conf << EOFnet.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_tw_buckets = 20480net.ipv4.tcp_max_syn_backlog = 20480net.core.netdev_max_backlog = 262144net.ipv4.tcp_fin_timeout = 20EOF# 减少SWAP使用echo "0" > /proc/sys/vm/swappiness
9.监控 100 台服务器磁盘利用率脚本
#!/bin/bashHOST_INFO=host.infofor IP in $(awk '/^[^#]/{print $1}' $HOST_INFO); doUSER=$(awk -v ip=$IP 'ip==$1{print $2}' $HOST_INFO)PORT=$(awk -v ip=$IP 'ip==$1{print $3}' $HOST_INFO)TMP_FILE=/tmp/disk.tmpssh -p $PORT $USER@$IP 'df -h' > $TMP_FILEUSE_RATE_LIST=$(awk 'BEGIN{OFS="="}/^\/dev/{print $NF,int($5)}' $TMP_FILE) for USE_RATE in $USE_RATE_LIST; doPART_NAME=${USE_RATE%=*}USE_RATE=${USE_RATE#*=}if [ $USE_RATE -ge 80 ]; thenecho "Warning: $PART_NAME Partition usage $USE_RATE%!" fidonedone
10.并发从数台机器中获取 hostname,并记录返回信息花 费的时长,重定向到一个文件 hostname.txt 中,在全部 完成后输出花费时长最短的那台机器的 CPU 信息。
#!bin/bash# 所以主机,以空格分隔ALL_HOSTS=(IP 地址 IP 地址)for host in ${ALL_HOSTS[*]}do{start_time=$(date +'%s')ssh $host "hostname" &>/dev/nullsleep 2stop_time=$(date +'%s')time_consuming=$((stop_time-start_time))echo "$host: $time_consuming" >>hostname.txt}&donewaithost=$(sort -n -k 2 hostname.txt | head -1 | awk -F':' '{print $1}')ssh $host "top -b -n 1"
11.统计 proc 目类下 Linux 进程相关数量信息,输出总进 程数, running 进程数, stoped 进程数, sleeing 进程 数, zombie 进程数。 输出所有 zombie 的进程到 zombie.txt 杀死所有 zombie 进程。
#!/bin/bashALL_PROCESS=$(ls proc/ | egrep '[0-9]+')running_count=0stoped_count=0sleeping_count=0zombie_count=0for pid in ${ALL_PROCESS[*]}dotest -f proc/$pid/status && state=$(egrep "State" proc/$pid/status | awk '{print $2}')case "$state" inR)running_count=$((running_count+1));;T)stoped_count=$((stoped_count+1));;S)sleeping_count=$((sleeping_count+1));;Z)zombie_count=$((zombie_count+1))echo "$pid" >>zombie.txtkill -9 "$pid";;esacdoneecho -e "total:$((running_count+stoped_count+sleeping_count+zombie_count))\nrunning: $running_count\nstoped: $stoped_count\nsleeping: $sleeping_count\nzombie: $zombie_count"
12.把当前目录(包含子目录)下所有后缀为 ".sh" 的文件
后缀变更为 ".shell" ,之后删除每个文件的第二行。
#!/bin/bashALL_SH_FILE=$(find . -type f -name "*.sh")for file in ${ALL_SH_FILE[*]}dofilename=$(echo $file | awk -F'.sh' '{print $1}')new_filename="${filename}.shell"mv "$file" "$new_filename"sed -i '2d' "$new_filename"done
13.判断目录 tmp/jstack 是否存在,不存在则新建一个目 录,若存在则删除目录下所有内容。 每隔 1 小时打印 inceptor server 的 jstack 信息,并以 jstack_${当前时间} 命名文件,每当目录下超过 10 个文件后,删除最旧的文件。
#!/bin/bashDIRPATH='/tmp/jstack'CURRENT_TIME=$(date +'%F'-'%H:%M:%S')if [ ! -d "$DIRPATH" ];thenmkdir "$DIRPATH"elserm -rf "$DIRPATH"/*ficd "$DIRPATH"while truedosleep 3600# 这里需要将inceptor改后自己的java进程名称pid=$(ps -ef | grep 'inceptor' | grep -v grep | awk '{print $2}') jstack $pid >> "jstack_${CURRENT_TIME}"dir_count=$(ls | wc -l)if [ "$dir_count" -gt 10 ];thenrm -f $(ls -tr | head -1)fidone
14.从 test.log 中截取当天的所有 gc 信息日志,并统计 gc 时间的平均值和时长最长的时间。
#!/bin/bashawk '{print $2}' hive-server2.log | tr -d ':' | awk '{sum+=$1} END {print "avg: ", sum/NR}' >>capture_hive_log.logawk '{print $2}' hive-server2.log | tr -d ':' | awk '{max = 0} {if ($1+0 > max+0) max=$1} END {print "Max: ", max}'>>capture_hive_log.log
15.查找 80 端口请求数最高的前 20 个 IP 地址,判断中间 最小的请求数是否大于 500,如大于 500,则输出系统活动 情况报告到 alert.txt,如果没有,则在 600s 后重试,直 到有输出为止。
#!/bin/bashstate="true"while $statedoSMALL_REQUESTS=$(netstat -ant | awk -F'[ :]+' '/:22/{count[$4]++} END {for(ip in count) print count[ip]}' | sort -n | head -20 | head -1)if [ "$SMALL_REQUESTS" -gt 500 ];thensar -A > alert.txtstate="false"elsesleep 6continuefidone
16.将当前目录下大于 10K 的文件转移到 /tmp 目录,再按 照文件大小顺序,从大到小输出文件名。
#!/bin/bash# 目标目录DIRPATH='/tmp'# 查看目录FILEPATH='.'find "$FILEPATH" -size +10k -type f | xargs -i mv {} "$DIRPATH"ls -lS "$DIRPATH" | awk '{if(NR>1) print $NF}'

👆点击查看更多内容👆
推荐阅读
记得星标记一下,下次更容易找到我





