Published November 4, 2025 | Version v1
Dataset Open

Simulation and dataset-application code for the distribution "The Half-Logistic Generalized Power Lindley Distribution: Theory and Applications"

  • 1. ROR icon Bitlis Eren University

Description

Simulation and dataset-application code for the distribution “The Half-Logistic Generalized Power Lindley Distribution: Theory and Applications” are provided:

library(ggplot2)
library(goftest)
library(stats4)

data <- c(
  0.014, 0.034, 0.059, 0.061, 0.069, 0.080, 0.123, 0.142, 0.165, 0.210,
  0.381, 0.464, 0.479, 0.556, 0.574, 0.839, 0.917, 0.969, 0.991, 1.064,
  1.088, 1.091, 1.174, 1.270, 1.275, 1.355, 1.397, 1.477, 1.578, 1.649,
  1.702, 1.893, 1.932, 2.001, 2.161, 2.292, 2.326, 2.337, 2.628, 2.785,
  2.811, 2.886, 2.993, 3.122, 3.248, 3.715, 3.790, 3.857, 3.912, 4.100,
  4.106, 4.116, 4.315, 4.510, 4.580, 5.267, 5.299, 5.583, 6.065, 9.701
)
stopifnot(all(is.finite(data)), all(data > 0))

## ---- Model: CDF and PDF (Lindley) ----
## F(x;θ) = 1 - (1 + θ x/(θ+1)) exp(-θ x)
## f(x;θ) = (θ^2/(θ+1)) (1+x) exp(-θ x)
F_lindley <- function(x, theta){
  if(!is.finite(theta) || theta <= 0) return(rep(NA_real_, length(x)))
  x <- pmax(x, .Machine$double.xmin)
  1 - (1 + theta*x/(theta+1)) * exp(-theta*x)
}
logpdf_lindley <- function(x, theta){
  if(!is.finite(theta) || theta <= 0) return(rep(-Inf, length(x)))
  if(any(!is.finite(x)) || any(x <= 0)) return(rep(-Inf, length(x)))
  x <- pmax(x, .Machine$double.xmin)
  2*log(theta) - log(theta+1) + log1p(x) - theta*x
}
f_lindley <- function(x, theta) exp(logpdf_lindley(x, theta))

Negative log-likelihood on log-parameter 
## param = a with theta = exp(a)
nll <- function(a){
  theta <- exp(a)
  llvec <- logpdf_lindley(data, theta)
  if(any(!is.finite(llvec))) return(1e300)
  -sum(llvec)
}

MLE via optim (BFGS with Hessian) 
fit <- optim(log(1), nll, method = "BFGS", hessian = TRUE)
if(fit$convergence != 0) warning("optim did not fully converge; consider different starts.")

theta_hat <- exp(fit$par[1])

H <- fit$hessian            # 1x1
Vlog <- tryCatch(1/H[1,1], error=function(e) NA_real_)
se_theta <- if(is.finite(Vlog) && Vlog > 0) sqrt(Vlog) * theta_hat else NA_real_

logL <- -fit$value
n <- length(data); k <- 1
AIC  <- -2*logL + 2*k
BIC  <- -2*logL + log(n)*k
HQIC <- -2*logL + 2*k*log(log(n))
CAIC <- -2*logL + k*(log(n) + 1)

ks_res <- ks.test(data, function(x) F_lindley(x, theta_hat))
ad_res <- ad.test(data, null = F_lindley, theta = theta_hat)

cat("=== Estimated Parameters (Lindley) ===\n")
cat(sprintf("theta_hat = %.6f  (SE = %s)\n",
            theta_hat, ifelse(is.finite(se_theta), sprintf("%.6f", se_theta), "NA")))
cat("\n=== Information Criteria ===\n")
cat(sprintf("Log-Likelihood: %.6f\n", logL))
cat(sprintf("AIC  : %.6f\n", AIC))
cat(sprintf("BIC  : %.6f\n", BIC))
cat(sprintf("HQIC : %.6f\n", HQIC))
cat(sprintf("CAIC : %.6f\n", CAIC))
cat("\n=== KS Test (vs fitted CDF) ===\n")
cat(sprintf("KS Statistic: %.6f\np-value     : %.6f\n", ks_res$statistic, ks_res$p.value))
cat("\n=== Anderson–Darling Test (goftest::ad.test) ===\n")
cat(sprintf("AD Statistic: %.6f\np-value     : %.6f\n", ad_res$statistic, ad_res$p.value))

op <- par(mfrow = c(1, 2))

hist(data, probability = TRUE, breaks = 30,
     main = "Lindley PDF",
     col = "lightgray", xlab = "Data", border = "white")
curve(f_lindley(x, theta_hat), add = TRUE, lwd = 2, col = "blue")

plot(ecdf(data), main = "Lindley CDF",
     xlab = "Data", ylab = "Cumulative Probability")
curve(F_lindley(x, theta_hat), add = TRUE, lwd = 2, col = "red")
legend("bottomright", legend = c("Empirical CDF", "Fitted CDF"),
       col = c("black", "red"), lwd = 2, bty = "n")

par(op)

***************************************

# G(x;θ,β) = [1 - A] / [1 + A],  A = (1 + θ x^β/(θ+1)) * exp(-θ x^β)
G_cdf <- function(x, theta, beta){
  z <- theta * x^beta
  A <- (1 + z/(theta+1)) * exp(-z)
  (1 - A) / (1 + A)
}

logpdf_g <- function(x, theta, beta){
  z    <- theta * x^beta
  log_num <- log(2) + 2*log(theta) + log(beta) +
             log1p(x^beta) - log(theta+1) + (beta-1)*log(x) - z
  A <- (1 + z/(theta+1)) * exp(-z)
  log_den <- 2*log1p(A)                 # log( (1+A)^2 )
  log_num - log_den
}
g_pdf <- function(x, theta, beta){ exp(logpdf_g(x, theta, beta)) }
# G(x) = (1 - A)/(1 + A) ⇒ A = (1 - u)/(1 + u)
# A = ((θ+1)+t)/(θ+1) * e^{-t}, t = θ x^β
# ((θ+1)+t) e^{-t} = A(θ+1) ⇒ s=t+(θ+1):  s e^{-s} = A(θ+1)e^{-(θ+1)}
# s e^{-s} = c  ⇒  -s = W(-c). Gerçek çözüm için W_{-1} dalı gerekir.

lambertW_m1 <- function(z, tol=1e-12, maxit=50){
  # z in (-1/e, 0). Çıktı W_{-1}(z) <= -1
  if (any(z >= 0 | z <= -1/exp(1))) stop("lambertW_m1: z ∈ (-1/e, 0) olmalı.")
  w <- log(-z) - log(-log(-z))   # iyi başlangıç (z ~ 0- için)
  for(it in 1:maxit){
    ew <- exp(w); f <- w*ew - z
    w1 <- w + 1
    denom <- ew*w1 - (w + 2)*f/(2*w1)
    dw <- f/denom
    w <- w - dw
    if (max(abs(dw)) < tol) break
  }
  w
}

rg_thlgpl <- function(n, theta, beta){
  u <- runif(n)
  u <- pmin(pmax(u, 1e-12), 1-1e-12)
  A <- (1 - u)/(1 + u)
  c <- A * (theta+1) * exp(-(theta+1))
  z <- -c
  # W_{-1} dalı:
  w <- lambertW_m1(z)
  s <- -w
  t <- s - (theta+1)
  y <- t / theta          # y = x^β
  x <- y^(1/beta)
  x
}


nll_g <- function(par, dat){
  a <- par[1]; b <- par[2]              
  theta <- exp(a); beta <- exp(b)
  ll <- sum(logpdf_g(dat, theta, beta))
  if(!is.finite(ll)) ll <- -1e300
  -ll
}

mle_once <- function(x, init=log(c(theta=1, beta=1))){
  fit <- optim(init, nll_g, dat=x, method="L-BFGS-B",
               control=list(maxit=200))
  a <- fit$par[1]; b <- fit$par[2]
  th_hat <- exp(a); be_hat <- exp(b)
  H <- tryCatch(optimHess(fit$par, nll_g, dat=x),
                error=function(e) matrix(NA_real_, 2, 2))
  V <- tryCatch(solve(H), error=function(e) matrix(NA_real_, 2, 2))
  se <- rep(NA_real_, 2)
  if(all(is.finite(diag(V))) && all(diag(V) > 0)){
    se_log <- sqrt(diag(V))
    se <- se_log * c(th_hat, be_hat)   # delta yöntemi
  }
  list(est=c(theta=th_hat, beta=be_hat), se=c(theta=se[1], beta=se[2]))
}

make_table_MLE <- function(theta, beta, n_vals=c(100,200,300,400), R=40,
                           init=log(c(theta=1, beta=1))){
  set.seed(123)
  labs <- c("θ","β"); out <- list(); true <- c(theta, beta)
  for(n in n_vals){
    ests <- low <- up <- matrix(NA_real_, R, 2)
    for(r in 1:R){
      x <- rg_thlgpl(n, theta, beta)
      m <- mle_once(x, init=init)
      ests[r,] <- m$est
      low[r,]  <- m$est - 1.96*m$se
      up[r,]   <- m$est + 1.96*m$se
    }
    est_bar <- colMeans(ests, na.rm=TRUE)
    bias    <- est_bar - true
    mse     <- colMeans((ests - matrix(true, R, 2, byrow=TRUE))^2, na.rm=TRUE)
    low_m   <- colMeans(low, na.rm=TRUE)
    up_m    <- colMeans(up,  na.rm=TRUE)
    len_m   <- up_m - low_m
    out[[length(out)+1]] <- data.frame(
      n=n, Parameter=labs,
      ML_Estimate=est_bar, Bias=bias, MSE=mse,
      Lower_Limit=low_m, Upper_Limit=up_m, Length=len_m,
      row.names=NULL
    )
  }
  do.call(rbind, out)
}

gibbs_once <- function(x, niter=3000, burn=1000, thin=10,
                       start=log(c(theta=1, beta=1)),
                       prior=list(kt=1, lt=0.1, kb=1, lb=0.1), # Gam(k, l) oran: lt
                       prop_sd=c(0.25, 0.25)){
  a <- start[1]; b <- start[2]
  keep <- seq(burn+1, niter, by=thin)
  out  <- matrix(NA_real_, length(keep), 2); k <- 0
  logpost <- function(a,b){
    theta<-exp(a); beta<-exp(b)
    lp <- sum(logpdf_g(x, theta, beta))
    with(prior, { lp <- lp + kt*a - lt*theta + kb*b - lb*beta })
    if(!is.finite(lp)) lp <- -1e300
    lp
  }
  lcur <- logpost(a,b)
  for(it in 1:niter){
    ap <- rnorm(1,a,prop_sd[1]); lp <- logpost(ap,b); if(log(runif(1)) < lp - lcur){ a<-ap; lcur<-lp }
    bp <- rnorm(1,b,prop_sd[2]); lp <- logpost(a,bp); if(log(runif(1)) < lp - lcur){ b<-bp; lcur<-lp }
    if(it %in% keep){ k <- k+1; out[k,] <- c(exp(a),exp(b)) }
  }
  colnames(out) <- c("theta","beta")
  list(mean=colMeans(out),
       q=apply(out, 2, quantile, probs=c(.025,.5,.975)))
}

make_table_GIBBS <- function(theta, beta,
                             n_vals=c(150,200,300,400), R=30,
                             niter=3000, burn=1000, thin=10){
  set.seed(456)
  labs <- c("θ","β"); out <- list(); true <- c(theta, beta)
  for(n in n_vals){
    ests <- low <- up <- matrix(NA_real_, R, 2)
    for(r in 1:R){
      x  <- rg_thlgpl(n, theta, beta)
      bz <- gibbs_once(x, niter=niter, burn=burn, thin=thin)
      ests[r,] <- bz$mean
      low[r,]  <- bz$q[1,]; up[r,] <- bz$q[3,]
    }
    est_bar <- colMeans(ests, na.rm=TRUE)
    bias    <- est_bar - true
    mse     <- colMeans((ests - matrix(true, R, 2, byrow=TRUE))^2, na.rm=TRUE)
    low_m   <- colMeans(low, na.rm=TRUE)
    up_m    <- colMeans(up,  na.rm=TRUE)
    len_m   <- up_m - low_m
    out[[length(out)+1]] <- data.frame(
      n=n, Parameter=labs,
      Gibbs_Estimate=est_bar, Bias=bias, MSE=mse,
      Lower_Limit=low_m, Upper_Limit=up_m, Length=len_m,
      row.names=NULL
    )
  }
  do.call(rbind, out)
}
set.seed(7)

tab_gibbs <- make_table_GIBBS(theta=1.5, beta=0.9,
                              n_vals=c(100,200,300,400), R=10,
                              niter=3000, burn=1000, thin=10)

tab_mle   <- make_table_MLE(theta=1.5, beta=0.9,
                            n_vals=c(100,200,300,400), R=10,
                            init=log(c(theta=1.2, beta=0.8)))  # iyi başlangıç

print(tab_gibbs)
print(tab_mle)

****************************

## MLE & Bayes (RW-MH)
make_summary_table <- function(estimates, true_vals, lows, ups, method_name){
  # estimates: R x p (R tekrar, p parametre)
  # lows, ups: R x p  (alt/üst CI)
  p <- length(true_vals)
  est_bar <- colMeans(estimates, na.rm=TRUE)
  bias    <- est_bar - true_vals
  mse     <- colMeans((estimates - matrix(true_vals, nrow(estimates), p, byrow=TRUE))^2, na.rm=TRUE)
  low_m   <- colMeans(lows, na.rm=TRUE)
  up_m    <- colMeans(ups, na.rm=TRUE)
  len_m   <- up_m - low_m
  data.frame(
    Parameter = colnames(estimates),
    Estimate  = est_bar,
    Bias      = bias,
    MSE       = mse,
    Lower     = low_m,
    Upper     = up_m,
    Length    = len_m,
    Method    = method_name,
    row.names = NULL
  )
}

make_table_MLE_fmt <- function(theta, beta, n_vals=c(100,200,300,400), R=50){
  true <- c(theta, beta)
  labs <- c("θ","β")
  out <- list()
  for(n in n_vals){
    ests <- lows <- ups <- matrix(NA_real_, R, 2, dimnames=list(NULL,labs))
    for(r in 1:R){
      x <- rg_newmodel(n, theta, beta)
      fit <- mle_once(x)
      ests[r,] <- fit$est
      if(all(is.finite(fit$se))){
        lows[r,] <- fit$est - 1.96*fit$se
        ups[r,]  <- fit$est + 1.96*fit$se
      }
    }
    tab <- make_summary_table(ests, true, lows, ups, "MLE")
    tab$n <- n
    out[[length(out)+1]] <- tab
  }
  do.call(rbind, out)
}

make_table_BAYES_fmt <- function(theta, beta, n_vals=c(100,200,300,400), R=50,
                                 niter=3000, burn=1000, thin=10){
  true <- c(theta, beta)
  labs <- c("θ","β")
  out <- list()
  for(n in n_vals){
    ests <- lows <- ups <- matrix(NA_real_, R, 2, dimnames=list(NULL,labs))
    for(r in 1:R){
      x  <- rg_newmodel(n, theta, beta)
      bz <- rw_mh_once(x, niter=niter, burn=burn, thin=thin)
      ests[r,] <- bz$mean
      lows[r,] <- bz$q[1,]
      ups[r,]  <- bz$q[3,]
    }
    tab <- make_summary_table(ests, true, lows, ups, "Bayes")
    tab$n <- n
    out[[length(out)+1]] <- tab
  }
  do.call(rbind, out)
}


set.seed(77)
tab_mle   <- make_table_MLE_fmt(theta=4.5, beta=0.8,
                                n_vals=c(100,200,300,400), R=30)
tab_bayes <- make_table_BAYES_fmt(theta=4.5, beta=0.8,
                                  n_vals=c(100,200,300,400), R=30)

print(tab_mle)
print(tab_bayes)

********************


## HL–G Power Lindley — MLE + Simple Bayesian 
data <- c(
  0.1405, 0.1566, 0.1577, 0.1604, 0.1608, 0.2215, 0.2994, 0.3131, 0.3246, 0.3247,
  0.3295, 0.3300, 0.3379, 0.3397, 0.3523, 0.3589, 0.3933, 0.4176, 0.4258, 0.4356,
  0.4421, 0.4444, 0.4505, 0.4558, 0.4683, 0.4733, 0.4846, 0.4889, 0.5096, 0.5177,
  0.5278, 0.5347, 0.5433, 0.5442, 0.5508, 0.5527, 0.5606, 0.5607, 0.5671, 0.5753,
  0.5828, 0.6030, 0.6050, 0.6136, 0.6261, 0.6395, 0.6469, 0.6512, 0.6816, 0.6994,
  0.7048, 0.7292, 0.7430, 0.7455, 0.7798, 0.7984, 0.8147, 0.8230, 0.8302, 0.8342,
  0.9794
)
stopifnot(all(is.finite(data)), all(data > 0))
## G(x;θ,β)= (1 - A)/(1 + A),  A = (1 + θ x^β/(θ+1)) exp(-θ x^β)
G_cdf <- function(x, theta, beta){
  if (!is.finite(theta) || !is.finite(beta) || theta <= 0 || beta <= 0)
    return(rep(NA_real_, length(x)))
  x <- pmax(x, .Machine$double.xmin)
  z <- theta * x^beta
  A <- (1 + z/(theta+1)) * exp(-z)
  (1 - A)/(1 + A)
}

## log g(x;θ,β) = log( 2 * θ^2 * β/(θ+1) * x^{β-1} * (1+x^β) * e^{-θ x^β} ) - 2*log(1+A)
logpdf_HLPL <- function(x, theta, beta){
  if (!is.finite(theta) || !is.finite(beta) || theta <= 0 || beta <= 0)
    return(rep(-Inf, length(x)))
  if (any(!is.finite(x)) || any(x <= 0)) return(rep(-Inf, length(x)))
  x <- pmax(x, .Machine$double.xmin)
  z <- theta * x^beta
  log_num <- log(2) + 2*log(theta) + log(beta) - log(theta+1) +
    (beta-1)*log(x) + log1p(pmin(x^beta, 1e300)) - z
  A <- (1 + z/(theta+1)) * exp(-z)
  log_den <- 2*log1p(pmin(A, 1e300))
  log_num - log_den
}
g_pdf <- function(x, theta, beta) exp(logpdf_HLPL(x, theta, beta))

nll <- function(par_log){
  th <- exp(par_log[1]); be <- exp(par_log[2])
  lv <- logpdf_HLPL(data, th, be)
  if (any(!is.finite(lv))) return(1e300)
  -sum(lv)
}
start_log <- log(c(theta=1, beta=1))
fit_mle <- optim(start_log, nll, method="BFGS", hessian=TRUE)
if(fit_mle$convergence != 0) warning("optim did not fully converge; consider different starts.")

theta_hat <- exp(fit_mle$par[1]); beta_hat <- exp(fit_mle$par[2])

H <- fit_mle$hessian
V <- tryCatch(solve(H), error=function(e) matrix(NA_real_, 2, 2))
se_theta <- se_beta <- NA_real_
if (all(is.finite(V)) && all(diag(V) > 0)){
  se_log <- sqrt(diag(V))
  se_theta <- se_log[1] * theta_hat
  se_beta  <- se_log[2] * beta_hat
}

logL <- -fit_mle$value
n <- length(data); k <- 2
AIC  <- -2*logL + 2*k
BIC  <- -2*logL + log(n)*k
HQIC <- -2*logL + 2*k*log(log(n))
CAIC <- -2*logL + k*(log(n) + 1)

ks_res <- ks.test(data, function(xx) G_cdf(xx, theta_hat, beta_hat))

ad_stat <- NA_real_; ad_p <- NA_real_
if (isTRUE(has_goftest)) {
  adr <- goftest::ad.test(data, null=G_cdf, theta=theta_hat, beta=beta_hat)
  ad_stat <- as.numeric(adr$statistic); ad_p <- as.numeric(adr$p.value)
} else {
  u <- sort(pmin(pmax(G_cdf(data, theta_hat, beta_hat), 1e-12), 1-1e-12))
  i <- seq_along(u); n <- length(u)
  ad_stat <- -n - (1/n) * sum((2*i - 1) * (log(u) + log(1 - rev(u))))
  ad_p <- NA_real_
}

bayes_hlgpl <- function(x,
                        niter=15000, burn=5000, thin=10,
                        start=c(theta=1, beta=1),
                        prior=list(kt=1, lt=0.1, kb=1, lb=0.1), # Gamma(shape, rate)
                        prop_sd=c(0.25, 0.25), seed=123){

  set.seed(seed)
  logpost <- function(a,b){
    th <- exp(a); be <- exp(b)
    lp <- sum(logpdf_HLPL(x, th, be))
    if(!is.finite(lp)) return(-Inf)
    with(prior, lp + (kt-1)*a - lt*th + (kb-1)*b - lb*be + (a+b)) # + Jacobian
  }

  a <- log(start["theta"]); b <- log(start["beta"])
  cur <- logpost(a,b)
  if(!is.finite(cur)){
    a <- log(1); b <- log(1); cur <- logpost(a,b)
    if(!is.finite(cur)) stop("Start/prior invalid.")
  }

  keep <- seq(burn+1, niter, by=thin)
  out  <- matrix(NA_real_, nrow=length(keep), ncol=2, dimnames=list(NULL,c("theta","beta")))
  kacc_a <- 0L; kacc_b <- 0L; jj <- 0L

  for(it in 1:niter){
    ap <- rnorm(1, a, prop_sd[1]); lp <- logpost(ap,b)
    if (is.finite(lp) && log(runif(1)) < lp - cur){ a <- ap; cur <- lp; kacc_a <- kacc_a+1L }
    bp <- rnorm(1, b, prop_sd[2]); lp <- logpost(a,bp)
    if (is.finite(lp) && log(runif(1)) < lp - cur){ b <- bp; cur <- lp; kacc_b <- kacc_b+1L }
    if (it %in% keep){ jj <- jj+1L; out[jj,] <- c(exp(a), exp(b)) }
  }

  list(draws=out,
       accept=c(a=kacc_a/niter, b=kacc_b/niter),
       mean=colMeans(out),
       sd=apply(out,2,sd),
       ci95=apply(out,2,quantile, probs=c(.025,.975)))
}

fit_bayes <- bayes_hlgpl(
  x=data,
  niter=15000, burn=5000, thin=10,
  start=c(theta=theta_hat, beta=beta_hat),   # MLE başlangıç
  prior=list(kt=1, lt=0.10, kb=1, lb=0.10),
  prop_sd=c(0.25, 0.25),
  seed=7
)

cat("\n=== MLE RESULTS — HL–G PL ===\n")
cat(sprintf("theta_hat = %.6f (SE= %s)\n", theta_hat, ifelse(is.finite(se_theta), sprintf("%.6f",se_theta),"NA")))
cat(sprintf("beta_hat  = %.6f (SE= %s)\n", beta_hat,  ifelse(is.finite(se_beta),  sprintf("%.6f",se_beta), "NA")))
cat(sprintf("LogL= %.6f | AIC= %.6f | BIC= %.6f | HQIC= %.6f | CAIC= %.6f\n", logL, AIC, BIC, HQIC, CAIC))
cat(sprintf("KS: D= %.6f, p= %.6f\n", ks_res$statistic, ks_res$p.value))
cat(sprintf("AD: A^2= %.6f, p= %s\n", ad_stat, ifelse(is.na(ad_p),"NA",sprintf("%.6f",ad_p))))

cat("\n=== Bayesian (RW–MH) RESULTS — HL–G PL ===\n")
cat(sprintf("Accept rates  : a=%.3f, b=%.3f\n", fit_bayes$accept[1], fit_bayes$accept[2]))
cat(sprintf("Posterior mean: theta=%.6f, beta=%.6f\n", fit_bayes$mean[1], fit_bayes$mean[2]))
cat(sprintf("Posterior sd  : theta=%.6f, beta=%.6f\n", fit_bayes$sd[1], fit_bayes$sd[2]))
cat(sprintf("95%% CrI theta : [%.6f, %.6f]\n", fit_bayes$ci95[1,1], fit_bayes$ci95[2,1]))
cat(sprintf("95%% CrI beta  : [%.6f, %.6f]\n", fit_bayes$ci95[1,2], fit_bayes$ci95[2,2]))

tab_compare <- data.frame(
  Parameter         = c("θ","β"),
  MLE_Estimate      = c(theta_hat, beta_hat),
  MLE_SE            = c(se_theta,  se_beta),
  Bayesian_Estimate = c(fit_bayes$mean[1], fit_bayes$mean[2]),
  Bayesian_SE       = c(fit_bayes$sd[1],   fit_bayes$sd[2]),
  check.names = FALSE
)
num_cols <- sapply(tab_compare, is.numeric)
tab_rounded <- tab_compare
tab_rounded[ , num_cols] <- lapply(tab_rounded[ , num_cols], function(v) round(v, 6))

cat("\n=== Table: MLE vs Bayesian (Estimates & SE) ===\n")
print(tab_rounded, row.names = FALSE)

Files

Simulation and dataset-application code for the distribution The Half Logistic Generalized Power Lindley Distribution Theory and Applications.txt